Spaces:
Runtime error
Runtime error
Commit
·
dfdbfa8
1
Parent(s):
1b75933
Update app.py
Browse files
app.py
CHANGED
@@ -3,38 +3,41 @@ from PIL import Image
|
|
3 |
import io
|
4 |
import base64
|
5 |
|
6 |
-
# Placeholder for google.generativeai package
|
7 |
import google.generativeai as genai
|
8 |
|
9 |
# Configure the API key (should be set as an environment variable or secure storage in production)
|
10 |
-
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
|
11 |
genai.configure(api_key=api_key)
|
12 |
|
13 |
-
generation_config =
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
|
18 |
safety_settings = []
|
19 |
|
20 |
-
# Initialize session state
|
21 |
-
if
|
22 |
-
st.session_state
|
23 |
|
24 |
# UI layout
|
25 |
st.title("Gemini Chatbot")
|
26 |
|
27 |
# Display the chat history
|
28 |
-
for
|
29 |
role, text = message
|
30 |
-
|
31 |
-
st.text_area(f"{role.title()} says:", value=text, height=75, disabled=True, key=key)
|
32 |
|
33 |
# Text input for the user to send messages
|
34 |
user_input = st.text_input("Enter your message here:", key="user_input")
|
35 |
|
36 |
# File uploader for images
|
37 |
-
uploaded_files = st.file_uploader(
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
# Function to convert image to base64
|
40 |
def get_image_base64(image):
|
@@ -45,47 +48,48 @@ def get_image_base64(image):
|
|
45 |
|
46 |
# When the 'Send' button is clicked, process the input and generate a response
|
47 |
if st.button("Send", key="send_button"):
|
48 |
-
|
49 |
-
|
50 |
-
st.session_state
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
# Process and save uploaded images to the chat history
|
53 |
-
image_parts = []
|
54 |
if uploaded_files:
|
55 |
for uploaded_file in uploaded_files:
|
56 |
bytes_data = uploaded_file.read()
|
57 |
image = Image.open(io.BytesIO(bytes_data))
|
58 |
image_base64 = get_image_base64(image)
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
}
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
generation_config=generation_config,
|
75 |
-
safety_settings=safety_settings
|
76 |
-
)
|
77 |
-
|
78 |
-
# Generate the response
|
79 |
-
response = model.generate_content(prompts)
|
80 |
-
|
81 |
-
# Corrected response handling
|
82 |
-
response_text = response.text if hasattr(response, "text") else "No response text found."
|
83 |
-
|
84 |
-
# Save the model response to the chat history
|
85 |
-
st.session_state.chat_history.append(("model", response_text))
|
86 |
-
|
87 |
-
# Redisplay the updated chat history
|
88 |
-
for idx, message in enumerate(st.session_state.chat_history):
|
89 |
role, text = message
|
90 |
-
|
91 |
-
st.text_area(f"{role.title()} says:", value=text, height=75, disabled=True, key=key)
|
|
|
3 |
import io
|
4 |
import base64
|
5 |
|
|
|
6 |
import google.generativeai as genai
|
7 |
|
8 |
# Configure the API key (should be set as an environment variable or secure storage in production)
|
9 |
+
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" # Replace with your actual API key
|
10 |
genai.configure(api_key=api_key)
|
11 |
|
12 |
+
generation_config = genai.GenerationConfig(
|
13 |
+
temperature=0.9,
|
14 |
+
max_output_tokens=3000
|
15 |
+
)
|
16 |
|
17 |
safety_settings = []
|
18 |
|
19 |
+
# Initialize session state for chat history
|
20 |
+
if 'chat_history' not in st.session_state:
|
21 |
+
st.session_state['chat_history'] = []
|
22 |
|
23 |
# UI layout
|
24 |
st.title("Gemini Chatbot")
|
25 |
|
26 |
# Display the chat history
|
27 |
+
for message in st.session_state['chat_history']:
|
28 |
role, text = message
|
29 |
+
st.markdown(f"**{role.title()}:** {text}")
|
|
|
30 |
|
31 |
# Text input for the user to send messages
|
32 |
user_input = st.text_input("Enter your message here:", key="user_input")
|
33 |
|
34 |
# File uploader for images
|
35 |
+
uploaded_files = st.file_uploader(
|
36 |
+
"Upload images:",
|
37 |
+
type=["png", "jpg", "jpeg"],
|
38 |
+
accept_multiple_files=True,
|
39 |
+
key="file_uploader"
|
40 |
+
)
|
41 |
|
42 |
# Function to convert image to base64
|
43 |
def get_image_base64(image):
|
|
|
48 |
|
49 |
# When the 'Send' button is clicked, process the input and generate a response
|
50 |
if st.button("Send", key="send_button"):
|
51 |
+
if user_input:
|
52 |
+
# Save user input to the chat history
|
53 |
+
st.session_state['chat_history'].append(("User", user_input))
|
54 |
+
|
55 |
+
# Assuming the generative model accepts text input
|
56 |
+
prompts = [{"role": "user", "parts": [{"text": user_input}]}]
|
57 |
+
|
58 |
+
# Use the appropriate Gemini model based on the inputs
|
59 |
+
model_name = 'gemini-pro-vision' if uploaded_files else 'gemini-pro'
|
60 |
+
model = genai.GenerativeModel(
|
61 |
+
model_name=model_name,
|
62 |
+
generation_config=generation_config,
|
63 |
+
safety_settings=safety_settings
|
64 |
+
)
|
65 |
+
|
66 |
+
# Generate the response
|
67 |
+
response = model.generate_content(prompts)
|
68 |
+
response_text = response.text if hasattr(response, "text") else "No response text found."
|
69 |
+
|
70 |
+
# Save the model response to the chat history
|
71 |
+
st.session_state['chat_history'].append(("Gemini", response_text))
|
72 |
+
|
73 |
# Process and save uploaded images to the chat history
|
|
|
74 |
if uploaded_files:
|
75 |
for uploaded_file in uploaded_files:
|
76 |
bytes_data = uploaded_file.read()
|
77 |
image = Image.open(io.BytesIO(bytes_data))
|
78 |
image_base64 = get_image_base64(image)
|
79 |
+
st.session_state['chat_history'].append(("User", f"Uploaded image: {uploaded_file.name}"))
|
80 |
+
|
81 |
+
# Assuming the generative model accepts image input
|
82 |
+
image_prompt = {"role": "user", "parts": [{"mime_type": uploaded_file.type, "data": image_base64}]}
|
83 |
+
response = model.generate_content([image_prompt])
|
84 |
+
response_text = response.text if hasattr(response, "text") else "No response text found."
|
85 |
+
|
86 |
+
# Save the model response to the chat history
|
87 |
+
st.session_state['chat_history'].append(("Gemini", response_text))
|
88 |
+
|
89 |
+
# Clear the input box after sending the message
|
90 |
+
st.session_state['user_input'] = ""
|
91 |
+
|
92 |
+
# Re-display the chat history to include the new messages
|
93 |
+
for message in st.session_state['chat_history']:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
role, text = message
|
95 |
+
st.markdown(f"**{role.title()}:** {text}")
|
|