ziyadsuper2017 commited on
Commit
18bbff5
·
1 Parent(s): b557897

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -27
app.py CHANGED
@@ -3,7 +3,7 @@ from PIL import Image
3
  import io
4
  import base64
5
 
6
-
7
  import google.generativeai as genai
8
 
9
  # Configure the API key (should be set as an environment variable or secure storage in production)
@@ -40,7 +40,7 @@ def get_image_base64(image):
40
  buffered = io.BytesIO()
41
  image.save(buffered, format="JPEG")
42
  img_str = base64.b64encode(buffered.getvalue()).decode()
43
- return f"data:image/jpeg;base64,{img_str}"
44
 
45
  # When the 'Send' button is clicked, process the input and generate a response
46
  if st.button("Send"):
@@ -50,38 +50,35 @@ if st.button("Send"):
50
 
51
  # Process and save uploaded images to the chat history
52
  image_parts = []
53
- for uploaded_file in uploaded_files:
54
- bytes_data = uploaded_file.read()
55
- image = Image.open(io.BytesIO(bytes_data))
56
- image_base64 = get_image_base64(image)
57
- image_parts.append({
58
- "mime_type": uploaded_file.type,
59
- "data": image_base64
60
- })
61
- st.session_state.chat_history.append(("user", f"Uploaded image: {uploaded_file.name}"))
 
62
 
63
  # Prepare the prompts for the model
64
- prompts = []
65
- if user_input.strip():
66
- prompts.append({"role": "user", "parts": [{"text": user_input}]})
67
  for image_part in image_parts:
68
  prompts.append({"role": "user", "parts": [image_part]})
69
 
 
 
 
 
 
 
 
 
70
  # Generate the response
71
- if image_parts:
72
- model = genai.GenerativeModel(
73
- model_name='gemini-pro-vision',
74
- generation_config=generation_config,
75
- safety_settings=safety_settings
76
- )
77
- else:
78
- model = genai.GenerativeModel(
79
- model_name='gemini-pro',
80
- generation_config=generation_config,
81
- safety_settings=safety_settings
82
- )
83
  response = model.generate_content(prompts)
84
- response_text = response['text']
 
 
85
 
86
  # Save the model response to the chat history
87
  st.session_state.chat_history.append(("model", response_text))
 
3
  import io
4
  import base64
5
 
6
+ # Placeholder for google.generativeai package
7
  import google.generativeai as genai
8
 
9
  # Configure the API key (should be set as an environment variable or secure storage in production)
 
40
  buffered = io.BytesIO()
41
  image.save(buffered, format="JPEG")
42
  img_str = base64.b64encode(buffered.getvalue()).decode()
43
+ return img_str
44
 
45
  # When the 'Send' button is clicked, process the input and generate a response
46
  if st.button("Send"):
 
50
 
51
  # Process and save uploaded images to the chat history
52
  image_parts = []
53
+ if uploaded_files:
54
+ for uploaded_file in uploaded_files:
55
+ bytes_data = uploaded_file.read()
56
+ image = Image.open(io.BytesIO(bytes_data))
57
+ image_base64 = get_image_base64(image)
58
+ image_parts.append({
59
+ "mime_type": uploaded_file.type,
60
+ "data": image_base64
61
+ })
62
+ st.session_state.chat_history.append(("user", f"Uploaded image: {uploaded_file.name}"))
63
 
64
  # Prepare the prompts for the model
65
+ prompts = [{"role": "user", "parts": [{"text": user_input}]}] if user_input.strip() else []
 
 
66
  for image_part in image_parts:
67
  prompts.append({"role": "user", "parts": [image_part]})
68
 
69
+ # Use the appropriate Gemini model based on the inputs
70
+ model_name = 'gemini-pro-vision' if uploaded_files else 'gemini-pro'
71
+ model = genai.GenerativeModel(
72
+ model_name=model_name,
73
+ generation_config=generation_config,
74
+ safety_settings=safety_settings
75
+ )
76
+
77
  # Generate the response
 
 
 
 
 
 
 
 
 
 
 
 
78
  response = model.generate_content(prompts)
79
+
80
+ # Corrected response handling
81
+ response_text = response.text if hasattr(response, "text") else "No response text found."
82
 
83
  # Save the model response to the chat history
84
  st.session_state.chat_history.append(("model", response_text))