ziyadsuper2017 commited on
Commit
8239be3
·
1 Parent(s): 837873a

Reverting back to the last code- just added the multiple file upload option

Browse files
Files changed (1) hide show
  1. app.py +32 -26
app.py CHANGED
@@ -78,29 +78,35 @@ conn.close()
78
  # Separate section for image uploading
79
  st.title("Image Description Generator")
80
 
81
- uploaded_file = st.file_uploader("Upload an image here", type=["png", "jpg", "jpeg"])
82
-
83
- # Text input for asking questions about the image
84
- image_question = st.text_input("Ask something about the image:")
85
-
86
- if uploaded_file and image_question:
87
- image_parts = [
88
- {
89
- "mime_type": uploaded_file.type,
90
- "data": uploaded_file.read()
91
- },
92
- ]
93
-
94
- prompt_parts = [
95
- image_question,
96
- image_parts[0],
97
- ]
98
-
99
- model = genai.GenerativeModel(
100
- model_name="gemini-pro-vision",
101
- generation_config=generation_config,
102
- safety_settings=safety_settings
103
- )
104
-
105
- response = model.generate_content(prompt_parts)
106
- st.markdown(f"**Model's answer:** {response.text}")
 
 
 
 
 
 
 
78
  # Separate section for image uploading
79
  st.title("Image Description Generator")
80
 
81
+ # Change the file_uploader to accept multiple files
82
+ uploaded_files = st.file_uploader("Upload one or more images here", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
83
+
84
+ # Loop through the uploaded files and display them
85
+ for uploaded_file in uploaded_files:
86
+ # Display the image
87
+ st.image(uploaded_file)
88
+
89
+ # Text input for asking questions about the image
90
+ image_question = st.text_input(f"Ask something about {uploaded_file.name}:")
91
+
92
+ if image_question:
93
+ image_parts = [
94
+ {
95
+ "mime_type": uploaded_file.type,
96
+ "data": uploaded_file.read()
97
+ },
98
+ ]
99
+
100
+ prompt_parts = [
101
+ image_question,
102
+ image_parts[0],
103
+ ]
104
+
105
+ model = genai.GenerativeModel(
106
+ model_name="gemini-pro-vision",
107
+ generation_config=generation_config,
108
+ safety_settings=safety_settings
109
+ )
110
+
111
+ response = model.generate_content(prompt_parts)
112
+ st.markdown(f"**Model's answer:** {response.text}")