ziyadsuper2017 commited on
Commit
3f0dc42
·
1 Parent(s): 058b92f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -72
app.py CHANGED
@@ -1,9 +1,9 @@
1
  import os
2
  import time
3
  import uuid
4
- import sqlite3
5
  from typing import List, Tuple, Optional, Dict, Union
6
 
 
7
  import google.generativeai as genai
8
  import streamlit as st
9
  from PIL import Image
@@ -78,13 +78,6 @@ model_name = st.sidebar.selectbox(
78
  index=0,
79
  help="Gemini Pro is a text-only model that can generate natural language responses based on the chat history. Gemini Pro Vision is a multimodal model that can generate natural language responses based on the chat history and the uploaded images."
80
  )
81
- model_info = st.sidebar.expander("Model info", expanded=False)
82
- with model_info:
83
- st.markdown(f"""
84
- - Model name: {model_name}
85
- - Model size: {genai.get_model_size(model_name)}
86
- - Model description: {genai.get_model_description(model_name)}
87
- """)
88
 
89
  # Chat history
90
  st.title("Chatbot")
@@ -143,67 +136,4 @@ if clear_button:
143
  st.session_state["uploaded_files"] = None
144
  st.experimental_rerun()
145
 
146
- # Save chat history to a text file
147
- if download_button:
148
- chat_text = "\n".join([f"{r.title()}: {t}" for r, t in chat_history])
149
- st.download_button(
150
- label="Download chat history",
151
- data=chat_text,
152
- file_name="chat_history.txt",
153
- mime="text/plain"
154
- )
155
-
156
- # Generate model response
157
- if run_button or user_input:
158
- if user_input:
159
- chat_history.append({"role": role, "parts": [{"text": user_input}]})
160
- st.session_state["user_input"] = ""
161
- if role == "user":
162
-
163
- # Model code
164
- model = genai.GenerativeModel(
165
- model_name=model_name,
166
- generation_config=generation_config,
167
- safety_settings=safety_settings
168
- )
169
-
170
- if uploaded_files:
171
- # Preprocess the uploaded images and convert them to image_parts
172
- image_parts = []
173
- for uploaded_file in uploaded_files:
174
- image = Image.open(uploaded_file).convert('RGB')
175
- image_parts.append({
176
- "mime_type": uploaded_file.type,
177
- "data": uploaded_file.read()
178
- })
179
- # Display the uploaded images
180
- st.image(image)
181
-
182
- # Add the user input to the prompt_parts
183
- prompt_parts = [
184
- user_input,
185
- ] + image_parts
186
-
187
- # Use gemini-pro-vision model to generate the response
188
- response = model.generate_content(prompt_parts, stream=True)
189
- else:
190
- # Use gemini-pro model to generate the response
191
- response = model.generate_content(chat_history, stream=True)
192
-
193
- # Streaming effect
194
- chat_history.append({"role": "model", "parts": [{"text": ""}]})
195
- progress_bar.progress(0)
196
- for chunk in response:
197
- for i in range(0, len(chunk.text), 10):
198
- section = chunk.text[i:i + 10]
199
- chat_history[-1]["parts"][0]["text"] += section
200
- progress = min((i + 10) / len(chunk.text), 1.0)
201
- progress_bar.progress(progress)
202
- time.sleep(0.01)
203
- st.experimental_rerun()
204
- progress_bar.progress(1.0)
205
-
206
- st.session_state["chat_history"] = chat_history
207
- st.session_state["uploaded_files"] = None
208
-
209
- st.experimental_rerun()
 
1
  import os
2
  import time
3
  import uuid
 
4
  from typing import List, Tuple, Optional, Dict, Union
5
 
6
+ import sqlite3
7
  import google.generativeai as genai
8
  import streamlit as st
9
  from PIL import Image
 
78
  index=0,
79
  help="Gemini Pro is a text-only model that can generate natural language responses based on the chat history. Gemini Pro Vision is a multimodal model that can generate natural language responses based on the chat history and the uploaded images."
80
  )
 
 
 
 
 
 
 
81
 
82
  # Chat history
83
  st.title("Chatbot")
 
136
  st.session_state["uploaded_files"] = None
137
  st.experimental_rerun()
138
 
139
+ # Save chat history to a text```