ziyadsuper2017 commited on
Commit
9c3f46e
·
1 Parent(s): 6ae7b4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -24
app.py CHANGED
@@ -3,12 +3,13 @@ from PIL import Image
3
  import io
4
  import base64
5
  import uuid
 
6
 
7
  # Assuming google.generativeai as genai is the correct import based on your description
8
  import google.generativeai as genai
9
 
10
  # Configure the API key (should be set as an environment variable or secure storage in production)
11
- api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" # Replace with your actual API key
12
  genai.configure(api_key=api_key)
13
 
14
  generation_config = genai.GenerationConfig(
@@ -18,7 +19,7 @@ generation_config = genai.GenerationConfig(
18
 
19
  safety_settings = []
20
 
21
- # Initialize session state for chat history
22
  if 'chat_history' not in st.session_state:
23
  st.session_state['chat_history'] = []
24
  if 'file_uploader_key' not in st.session_state:
@@ -40,30 +41,34 @@ def send_message():
40
  user_input = st.session_state.user_input
41
  uploaded_files = st.session_state.uploaded_files
42
 
 
 
 
 
43
  if user_input:
44
- # Send text to the gemini-pro model for text-based conversation
45
- text_prompt = {"role": "user", "parts": [{"text": user_input}]}
46
- st.session_state['chat_history'].append(text_prompt)
47
 
 
 
 
 
 
 
 
 
 
48
  model = genai.GenerativeModel(
49
  model_name='gemini-pro',
50
  generation_config=generation_config,
51
  safety_settings=safety_settings
52
  )
53
- response = model.generate_content([text_prompt])
54
  response_text = response.text if hasattr(response, "text") else "No response text found."
55
  st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
56
 
57
- if uploaded_files:
58
- # Send images to the gemini-pro-vision model for image-based conversation
59
- image_prompts = []
60
- for uploaded_file in uploaded_files:
61
- image = Image.open(uploaded_file).convert("RGB") # Ensure image is in RGB
62
- image_base64 = get_image_base64(image)
63
- image_prompt = {"role": "user", "parts": [{"mime_type": uploaded_file.type, "data": image_base64}]}
64
- image_prompts.append(image_prompt)
65
- st.session_state['chat_history'].append(image_prompt)
66
-
67
  model = genai.GenerativeModel(
68
  model_name='gemini-pro-vision',
69
  generation_config=generation_config,
@@ -71,18 +76,22 @@ def send_message():
71
  )
72
  response = model.generate_content(image_prompts)
73
  response_text = response.text if hasattr(response, "text") else "No response text found."
 
 
74
  st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
75
 
76
  # Clear the user input and generate a new key for the file uploader widget to reset it
77
  st.session_state.user_input = ''
78
- st.session_state.uploaded_files = None
79
  st.session_state.file_uploader_key = str(uuid.uuid4())
80
 
 
81
  def clear_conversation():
82
- st.session_state.chat_history = []
 
83
 
84
  # Multiline text input for the user to send messages
85
- user_input = st.text_area("Enter your message here:", key="user_input")
86
 
87
  # File uploader for images
88
  uploaded_files = st.file_uploader(
@@ -103,12 +112,9 @@ for entry in st.session_state['chat_history']:
103
  role = entry["role"]
104
  parts = entry["parts"][0]
105
  if 'text' in parts:
106
- st.markdown(f"**{role.title()}**: {parts['text']}")
107
  elif 'data' in parts:
108
- # Decode the base64 image data
109
- base64_data = parts['data']
110
- st.markdown(f"**{role.title()}**: (Image)")
111
- st.image(base64_data, caption=f"Image from {role}", use_column_width=True)
112
 
113
  # Ensure the file_uploader widget state is tied to the randomly generated key
114
  st.session_state.uploaded_files = uploaded_files
 
3
  import io
4
  import base64
5
  import uuid
6
+ import os
7
 
8
  # Assuming google.generativeai as genai is the correct import based on your description
9
  import google.generativeai as genai
10
 
11
  # Configure the API key (should be set as an environment variable or secure storage in production)
12
+ api_key = os.getenv('AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM') # Replace with your actual API key
13
  genai.configure(api_key=api_key)
14
 
15
  generation_config = genai.GenerationConfig(
 
19
 
20
  safety_settings = []
21
 
22
+ # Initialize session state for chat history and file uploader key
23
  if 'chat_history' not in st.session_state:
24
  st.session_state['chat_history'] = []
25
  if 'file_uploader_key' not in st.session_state:
 
41
  user_input = st.session_state.user_input
42
  uploaded_files = st.session_state.uploaded_files
43
 
44
+ text_prompts = []
45
+ image_prompts = []
46
+
47
+ # Process text input for multi-turn conversation
48
  if user_input:
49
+ text_prompts.append({"role": "user", "parts": [{"text": user_input}]})
50
+ st.session_state['chat_history'].append({"role": "user", "parts": [{"text": user_input}]})
 
51
 
52
+ # Process uploaded images for single-turn conversation
53
+ if uploaded_files:
54
+ for uploaded_file in uploaded_files:
55
+ image = Image.open(uploaded_file).convert("RGB") # Ensure image is in RGB
56
+ image_base64 = get_image_base64(image)
57
+ image_prompts.append({"role": "user", "parts": [{"mime_type": uploaded_file.type, "data": image_base64}]})
58
+
59
+ # Generate text response if text input is provided
60
+ if text_prompts:
61
  model = genai.GenerativeModel(
62
  model_name='gemini-pro',
63
  generation_config=generation_config,
64
  safety_settings=safety_settings
65
  )
66
+ response = model.generate_content(st.session_state['chat_history'])
67
  response_text = response.text if hasattr(response, "text") else "No response text found."
68
  st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
69
 
70
+ # Generate image response if images are uploaded
71
+ if image_prompts:
 
 
 
 
 
 
 
 
72
  model = genai.GenerativeModel(
73
  model_name='gemini-pro-vision',
74
  generation_config=generation_config,
 
76
  )
77
  response = model.generate_content(image_prompts)
78
  response_text = response.text if hasattr(response, "text") else "No response text found."
79
+ for prompt in image_prompts:
80
+ st.session_state['chat_history'].append(prompt) # Append images to history
81
  st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
82
 
83
  # Clear the user input and generate a new key for the file uploader widget to reset it
84
  st.session_state.user_input = ''
85
+ st.session_state.uploaded_files = []
86
  st.session_state.file_uploader_key = str(uuid.uuid4())
87
 
88
+ # Function to clear conversation
89
  def clear_conversation():
90
+ st.session_state['chat_history'] = []
91
+ st.session_state['file_uploader_key'] = str(uuid.uuid4())
92
 
93
  # Multiline text input for the user to send messages
94
+ user_input = st.text_area("Enter your message here:", key="user_input", value="")
95
 
96
  # File uploader for images
97
  uploaded_files = st.file_uploader(
 
112
  role = entry["role"]
113
  parts = entry["parts"][0]
114
  if 'text' in parts:
115
+ st.markdown(f"{role.title()}: {parts['text']}")
116
  elif 'data' in parts:
117
+ st.markdown(f"{role.title()}: (Image)")
 
 
 
118
 
119
  # Ensure the file_uploader widget state is tied to the randomly generated key
120
  st.session_state.uploaded_files = uploaded_files