Spaces:
Runtime error
Runtime error
Commit
·
0019417
1
Parent(s):
3f0dc42
Update app.py
Browse files
app.py
CHANGED
@@ -95,8 +95,14 @@ for message in chat_history:
|
|
95 |
# User input
|
96 |
user_input = st.text_area("", height=5, key="user_input")
|
97 |
|
98 |
-
#
|
99 |
-
uploaded_files = st.
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
# Run button
|
102 |
run_button = st.button("Run", key="run_button")
|
@@ -128,12 +134,67 @@ st.markdown("""
|
|
128 |
</div>
|
129 |
""", unsafe_allow_html=True)
|
130 |
|
131 |
-
#
|
132 |
-
if
|
133 |
-
|
134 |
-
st.
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
# User input
|
96 |
user_input = st.text_area("", height=5, key="user_input")
|
97 |
|
98 |
+
# File uploader
|
99 |
+
uploaded_files = st.file_uploader("Upload images here or paste screenshots", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="uploaded_files")
|
100 |
+
|
101 |
+
# If files are uploaded, open and display them
|
102 |
+
if uploaded_files:
|
103 |
+
for uploaded_file in uploaded_files:
|
104 |
+
image = Image.open(uploaded_file)
|
105 |
+
st.image(image)
|
106 |
|
107 |
# Run button
|
108 |
run_button = st.button("Run", key="run_button")
|
|
|
134 |
</div>
|
135 |
""", unsafe_allow_html=True)
|
136 |
|
137 |
+
# Save chat history to a text file
|
138 |
+
if download_button:
|
139 |
+
chat_text = "\n".join([f"{r.title()}: {t}" for r, t in chat_history])
|
140 |
+
st.download_button(
|
141 |
+
label="Download chat history",
|
142 |
+
data=chat_text,
|
143 |
+
file_name="chat_history.txt",
|
144 |
+
mime="text/plain"
|
145 |
+
)
|
146 |
+
|
147 |
+
# Generate model response
|
148 |
+
if run_button or user_input:
|
149 |
+
if user_input:
|
150 |
+
chat_history.append({"role": role, "parts": [{"text": user_input}]})
|
151 |
+
st.session_state["user_input"] = ""
|
152 |
+
if role == "user":
|
153 |
+
|
154 |
+
# Model code
|
155 |
+
model = genai.GenerativeModel(
|
156 |
+
model_name=model_name,
|
157 |
+
generation_config=generation_config,
|
158 |
+
safety_settings=safety_settings
|
159 |
+
)
|
160 |
+
|
161 |
+
if uploaded_files:
|
162 |
+
# Preprocess the uploaded files and convert them to image_parts
|
163 |
+
image_parts = []
|
164 |
+
for uploaded_file in uploaded_files:
|
165 |
+
image = Image.open(uploaded_file).convert('RGB')
|
166 |
+
image_parts.append({
|
167 |
+
"mime_type": uploaded_file.type,
|
168 |
+
"data": uploaded_file.read()
|
169 |
+
})
|
170 |
+
# Display the uploaded images
|
171 |
+
st.image(image)
|
172 |
+
|
173 |
+
# Add the user input to the prompt_parts
|
174 |
+
prompt_parts = [
|
175 |
+
user_input,
|
176 |
+
] + image_parts
|
177 |
+
|
178 |
+
# Use gemini-pro-vision model to generate the response
|
179 |
+
response = model.generate_content(prompt_parts, stream=True)
|
180 |
+
else:
|
181 |
+
# Use gemini-pro model to generate the response
|
182 |
+
response = model.generate_content(chat_history, stream=True)
|
183 |
+
|
184 |
+
# Streaming effect
|
185 |
+
chat_history.append({"role": "model", "parts": [{"text": ""}]})
|
186 |
+
progress_bar.progress(0)
|
187 |
+
for chunk in response:
|
188 |
+
for i in range(0, len(chunk.text), 10):
|
189 |
+
section = chunk.text[i:i + 10]
|
190 |
+
chat_history[-1]["parts"][0]["text"] += section
|
191 |
+
progress = min((i + 10) / len(chunk.text), 1.0)
|
192 |
+
progress_bar.progress(progress)
|
193 |
+
time.sleep(0.01)
|
194 |
+
st.experimental_rerun()
|
195 |
+
progress_bar.progress(1.0)
|
196 |
+
|
197 |
+
st.session_state["chat_history"] = chat_history
|
198 |
+
st.session_state["uploaded_files"] = None
|
199 |
+
|
200 |
+
st.experimental_rerun()
|