Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,10 +5,10 @@ import base64
|
|
5 |
import uuid
|
6 |
from gtts import gTTS
|
7 |
import google.generativeai as genai
|
8 |
-
from io import BytesIO
|
9 |
|
10 |
# Set your API key
|
11 |
-
api_key = "
|
12 |
genai.configure(api_key=api_key)
|
13 |
|
14 |
# Configure the generative AI model
|
@@ -45,6 +45,9 @@ if 'file_uploader_key' not in st.session_state:
|
|
45 |
|
46 |
st.title("Gemini Chatbot")
|
47 |
|
|
|
|
|
|
|
48 |
# Helper functions for image processing and chat history management
|
49 |
def get_image_base64(image):
|
50 |
image = image.convert("RGB")
|
@@ -110,17 +113,23 @@ def send_message():
|
|
110 |
"parts": [{"mime_type": uploaded_file.type, "data": base64_image}]
|
111 |
})
|
112 |
|
113 |
-
# Determine if vision model should be used
|
114 |
use_vision_model = any(part.get('mime_type') == 'image/jpeg' for part in prompt_parts)
|
115 |
|
116 |
-
#
|
117 |
-
model_name =
|
|
|
|
|
|
|
|
|
118 |
model = genai.GenerativeModel(
|
119 |
model_name=model_name,
|
120 |
generation_config=generation_config,
|
121 |
safety_settings=safety_settings
|
122 |
)
|
|
|
123 |
chat_history_str = "\n".join(prompts)
|
|
|
124 |
if use_vision_model:
|
125 |
# Include text and images for vision model
|
126 |
generated_prompt = {"role": "user", "parts": prompt_parts}
|
|
|
5 |
import uuid
|
6 |
from gtts import gTTS
|
7 |
import google.generativeai as genai
|
8 |
+
from io import BytesIO
|
9 |
|
10 |
# Set your API key
|
11 |
+
api_key = "AIzaSyAHD0FwX-Ds6Y3eI-i5Oz7IdbJqR6rN7pg" # Replace with your actual API key
|
12 |
genai.configure(api_key=api_key)
|
13 |
|
14 |
# Configure the generative AI model
|
|
|
45 |
|
46 |
st.title("Gemini Chatbot")
|
47 |
|
48 |
+
# Model Selection Dropdown
|
49 |
+
selected_model = st.selectbox("Select a Gemini 1.5 model:", ["gemini-1.5-flash-latest", "gemini-1.5-pro-latest"])
|
50 |
+
|
51 |
# Helper functions for image processing and chat history management
|
52 |
def get_image_base64(image):
|
53 |
image = image.convert("RGB")
|
|
|
113 |
"parts": [{"mime_type": uploaded_file.type, "data": base64_image}]
|
114 |
})
|
115 |
|
116 |
+
# Determine if vision model should be used
|
117 |
use_vision_model = any(part.get('mime_type') == 'image/jpeg' for part in prompt_parts)
|
118 |
|
119 |
+
# Use the selected model
|
120 |
+
model_name = selected_model
|
121 |
+
if use_vision_model and "pro" not in model_name:
|
122 |
+
st.warning(f"The selected model ({model_name}) does not support image inputs. Choose a 'pro' model for image capabilities.")
|
123 |
+
return
|
124 |
+
|
125 |
model = genai.GenerativeModel(
|
126 |
model_name=model_name,
|
127 |
generation_config=generation_config,
|
128 |
safety_settings=safety_settings
|
129 |
)
|
130 |
+
|
131 |
chat_history_str = "\n".join(prompts)
|
132 |
+
|
133 |
if use_vision_model:
|
134 |
# Include text and images for vision model
|
135 |
generated_prompt = {"role": "user", "parts": prompt_parts}
|