ziyadsuper2017 commited on
Commit
3682f26
·
1 Parent(s): 0019417

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -110
app.py CHANGED
@@ -7,23 +7,25 @@ import sqlite3
7
  import google.generativeai as genai
8
  import streamlit as st
9
  from PIL import Image
 
 
10
 
11
  # Database setup
12
- conn = sqlite3.connect('chat_history.db')
13
  c = conn.cursor()
14
 
15
  c.execute('''
16
- CREATE TABLE IF NOT EXISTS history
17
- (role TEXT, message TEXT)
18
- ''')
19
 
20
  # Generative AI setup
21
- api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
22
  genai.configure(api_key=api_key)
23
 
24
  generation_config = {
25
- "temperature": 0.9,
26
- "max_output_tokens": 3000
27
  }
28
 
29
  safety_settings = []
@@ -35,74 +37,77 @@ st.set_page_config(page_title="Chatbot", page_icon="🤖")
35
  st.markdown("""
36
  <style>
37
  .container {
38
- display: flex;
39
  }
40
  .logo-text {
41
- font-weight:700 !important;
42
- font-size:50px !important;
43
- color: #f9a01b !important;
44
- padding-top: 75px !important;
45
  }
46
  .logo-img {
47
- float:right;
48
  }
49
  </style>
50
  <div class="container">
51
- <p class="logo-text">Chatbot</p>
52
- <img class="logo-img" src="https://media.roboflow.com/spaces/gemini-icon.png" width=120 height=120>
53
  </div>
54
  """, unsafe_allow_html=True)
55
 
56
  # Sidebar
57
  st.sidebar.title("Parameters")
58
  temperature = st.sidebar.slider(
59
- "Temperature",
60
- min_value=0.0,
61
- max_value=1.0,
62
- value=0.9,
63
- step=0.01,
64
- help="Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that expect a true or correct response, while higher temperatures can lead to more diverse or unexpected results."
65
  )
66
  max_output_tokens = st.sidebar.slider(
67
- "Token limit",
68
- min_value=1,
69
- max_value=2048,
70
- value=3000,
71
- step=1,
72
- help="Token limit determines the maximum amount of text output from one prompt. A token is approximately four characters. The default value is 2048."
73
  )
74
  st.sidebar.title("Model")
75
  model_name = st.sidebar.selectbox(
76
- "Select a model",
77
- options=["gemini-pro", "gemini-pro-vision"],
78
- index=0,
79
- help="Gemini Pro is a text-only model that can generate natural language responses based on the chat history. Gemini Pro Vision is a multimodal model that can generate natural language responses based on the chat history and the uploaded images."
80
  )
81
 
 
 
 
82
  # Chat history
83
  st.title("Chatbot")
84
  chat_history = st.session_state.get("chat_history", [])
85
 
86
  if len(chat_history) % 2 == 0:
87
- role = "user"
88
  else:
89
- role = "model"
90
 
91
  for message in chat_history:
92
- r, t = message["role"], message["parts"][0]["text"]
93
- st.markdown(f"**{r.title()}:** {t}")
94
-
95
  # User input
96
- user_input = st.text_area("", height=5, key="user_input")
97
 
98
  # File uploader
99
  uploaded_files = st.file_uploader("Upload images here or paste screenshots", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="uploaded_files")
100
 
101
  # If files are uploaded, open and display them
102
  if uploaded_files:
103
- for uploaded_file in uploaded_files:
104
- image = Image.open(uploaded_file)
105
- st.image(image)
106
 
107
  # Run button
108
  run_button = st.button("Run", key="run_button")
@@ -120,81 +125,62 @@ progress_bar = st.progress(0)
120
  st.markdown("""
121
  <style>
122
  .footer {
123
- position: fixed;
124
- left: 0;
125
- bottom: 0;
126
- width: 100%;
127
- background-color: #f9a01b;
128
- color: white;
129
- text-align: center;
130
  }
131
  </style>
132
  <div class="footer">
133
- <p>Made with Streamlit and Google Generative AI</p>
134
  </div>
135
  """, unsafe_allow_html=True)
136
 
 
 
 
 
 
 
 
 
137
  # Save chat history to a text file
138
  if download_button:
139
- chat_text = "\n".join([f"{r.title()}: {t}" for r, t in chat_history])
140
- st.download_button(
141
- label="Download chat history",
142
- data=chat_text,
143
- file_name="chat_history.txt",
144
- mime="text/plain"
145
- )
146
-
147
- # Generate model response
148
- if run_button or user_input:
149
- if user_input:
150
- chat_history.append({"role": role, "parts": [{"text": user_input}]})
151
- st.session_state["user_input"] = ""
152
- if role == "user":
153
-
154
- # Model code
155
- model = genai.GenerativeModel(
156
- model_name=model_name,
157
- generation_config=generation_config,
158
- safety_settings=safety_settings
159
- )
160
-
161
- if uploaded_files:
162
- # Preprocess the uploaded files and convert them to image_parts
163
- image_parts = []
164
- for uploaded_file in uploaded_files:
165
- image = Image.open(uploaded_file).convert('RGB')
166
- image_parts.append({
167
- "mime_type": uploaded_file.type,
168
- "data": uploaded_file.read()
169
- })
170
- # Display the uploaded images
171
- st.image(image)
172
-
173
- # Add the user input to the prompt_parts
174
- prompt_parts = [
175
- user_input,
176
- ] + image_parts
177
-
178
- # Use gemini-pro-vision model to generate the response
179
- response = model.generate_content(prompt_parts, stream=True)
180
- else:
181
- # Use gemini-pro model to generate the response
182
- response = model.generate_content(chat_history, stream=True)
183
-
184
- # Streaming effect
185
- chat_history.append({"role": "model", "parts": [{"text": ""}]})
186
- progress_bar.progress(0)
187
- for chunk in response:
188
- for i in range(0, len(chunk.text), 10):
189
- section = chunk.text[i:i + 10]
190
- chat_history[-1]["parts"][0]["text"] += section
191
- progress = min((i + 10) / len(chunk.text), 1.0)
192
- progress_bar.progress(progress)
193
- time.sleep(0.01)
194
- st.experimental_rerun()
195
- progress_bar.progress(1.0)
196
-
197
- st.session_state["chat_history"] = chat_history
198
- st.session_state["uploaded_files"] = None
199
-
200
- st.experimental_rerun()
 
7
  import google.generativeai as genai
8
  import streamlit as st
9
  from PIL import Image
10
+ import requests
11
+ from io import BytesIO
12
 
13
  # Database setup
14
+ conn = sqlite3.connect('chat_history.db')
15
  c = conn.cursor()
16
 
17
  c.execute('''
18
+ CREATE TABLE IF NOT EXISTS history
19
+ (role TEXT, message TEXT)
20
+ ''')
21
 
22
  # Generative AI setup
23
+ api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
24
  genai.configure(api_key=api_key)
25
 
26
  generation_config = {
27
+ "temperature": 0.9,
28
+ "max_output_tokens": 3000
29
  }
30
 
31
  safety_settings = []
 
37
  st.markdown("""
38
  <style>
39
  .container {
40
+ display: flex;
41
  }
42
  .logo-text {
43
+ font-weight:700 !important;
44
+ font-size:50px !important;
45
+ color: #f9a01b !important;
46
+ padding-top: 75px !important;
47
  }
48
  .logo-img {
49
+ float:right;
50
  }
51
  </style>
52
  <div class="container">
53
+ <p class="logo-text">Chatbot</p>
54
+ <img class="logo-img" src="https://media.roboflow.com/spaces/gemini-icon.png" width=120 height=120>
55
  </div>
56
  """, unsafe_allow_html=True)
57
 
58
  # Sidebar
59
  st.sidebar.title("Parameters")
60
  temperature = st.sidebar.slider(
61
+ "Temperature",
62
+ min_value=0.0,
63
+ max_value=1.0,
64
+ value=0.9,
65
+ step=0.01,
66
+ help="Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that expect a true or correct response, while higher temperatures can lead to more diverse or unexpected results."
67
  )
68
  max_output_tokens = st.sidebar.slider(
69
+ "Token limit",
70
+ min_value=1,
71
+ max_value=2048,
72
+ value=3000,
73
+ step=1,
74
+ help="Token limit determines the maximum amount of text output from one prompt. A token is approximately four characters. The default value is 2048."
75
  )
76
  st.sidebar.title("Model")
77
  model_name = st.sidebar.selectbox(
78
+ "Select a model",
79
+ options=["gemini-pro", "gemini-pro-vision"],
80
+ index=0,
81
+ help="Gemini Pro is a text-only model that can generate natural language responses based on the chat history. Gemini Pro Vision is a multimodal model that can generate natural language responses based on the chat history and the uploaded images."
82
  )
83
 
84
+ # Initialize user_input in session state
85
+ st.session_state["user_input"] = ""
86
+
87
  # Chat history
88
  st.title("Chatbot")
89
  chat_history = st.session_state.get("chat_history", [])
90
 
91
  if len(chat_history) % 2 == 0:
92
+ role = "user"
93
  else:
94
+ role = "model"
95
 
96
  for message in chat_history:
97
+ r, t = message["role"], message["parts"][0]["text"]
98
+ st.markdown(f"**{r.title()}:** {t}")
99
+
100
  # User input
101
+ user_input = st.text_area("", height=5, key="user_input")
102
 
103
  # File uploader
104
  uploaded_files = st.file_uploader("Upload images here or paste screenshots", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="uploaded_files")
105
 
106
  # If files are uploaded, open and display them
107
  if uploaded_files:
108
+ for uploaded_file in uploaded_files:
109
+ image = Image.open(uploaded_file)
110
+ st.image(image)
111
 
112
  # Run button
113
  run_button = st.button("Run", key="run_button")
 
125
  st.markdown("""
126
  <style>
127
  .footer {
128
+ position: fixed;
129
+ left: 0;
130
+ bottom: 0;
131
+ width: 100%;
132
+ background-color: #f9a01b;
133
+ color: white;
134
+ text-align: center;
135
  }
136
  </style>
137
  <div class="footer">
138
+ <p>Made with Streamlit and Google Generative AI</p>
139
  </div>
140
  """, unsafe_allow_html=True)
141
 
142
+ # Clear chat history and image uploader
143
+ if clear_button:
144
+ chat_history.clear()
145
+ st.session_state["chat_history"] = chat_history
146
+ st.session_state["user_input"] = ""
147
+ st.session_state["uploaded_files"] = None
148
+ st.experimental_rerun()
149
+
150
  # Save chat history to a text file
151
  if download_button:
152
+ chat_text = "\n".join([f"{r.title()}: {t}" for r, t in chat_history])
153
+ st.download_button(
154
+ label="Download chat history",
155
+ data=chat_text,
156
+ file_name="chat_history.txt",
157
+ mime="text/plain"
158
+ )
159
+
160
+ # Generate model response
161
+ if model_name == "gemini-pro":
162
+ response = genai.generate(
163
+ prompt=user_input,
164
+ temperature=temperature,
165
+ max_output_tokens=max_output_tokens
166
+ )
167
+ elif model_name == "gemini-pro-vision":
168
+ response = genai.generate(
169
+ prompt=user_input,
170
+ temperature=temperature,
171
+ max_output_tokens=max_output_tokens,
172
+ images=uploaded_files
173
+ )
174
+
175
+ # Update chat history
176
+ chat_history.append({"role": "model", "parts": [{"text": response}]})
177
+ st.session_state["chat_history"] = chat_history
178
+
179
+ # Update progress bar
180
+ progress_bar.progress(1)
181
+
182
+ # Clear user input
183
+ st.session_state["user_input"] = ""
184
+
185
+ # Rerun the app
186
+ st.experimental_rerun()