ziyadsuper2017 commited on
Commit
8aae6cc
·
1 Parent(s): 5e7a6b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -26
app.py CHANGED
@@ -3,20 +3,21 @@ from PIL import Image
3
  import io
4
  import base64
5
  import uuid
6
- import os
7
 
8
- # Assuming google.generativeai as genai is the correct import based on your description
9
  import google.generativeai as genai
10
 
11
- #Configure the API key (should be set as an environment variable or secure storage in production)
12
- api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" # Replace with your actual API key
13
  genai.configure(api_key=api_key)
14
 
 
15
  generation_config = genai.GenerationConfig(
16
  temperature=0.9,
17
  max_output_tokens=3000
18
  )
19
 
 
20
  safety_settings = []
21
 
22
  # Initialize session state for chat history and file uploader key
@@ -41,23 +42,11 @@ def send_message():
41
  user_input = st.session_state.user_input
42
  uploaded_files = st.session_state.uploaded_files
43
 
44
- text_prompts = []
45
- image_prompts = []
46
-
47
  # Process text input for multi-turn conversation
48
  if user_input:
49
- text_prompts.append({"role": "user", "parts": [{"text": user_input}]})
50
- st.session_state['chat_history'].append({"role": "user", "parts": [{"text": user_input}]})
51
-
52
- # Process uploaded images for single-turn conversation
53
- if uploaded_files:
54
- for uploaded_file in uploaded_files:
55
- image = Image.open(uploaded_file).convert("RGB") # Ensure image is in RGB
56
- image_base64 = get_image_base64(image)
57
- image_prompts.append({"role": "user", "parts": [{"mime_type": uploaded_file.type, "data": image_base64}]})
58
-
59
- # Generate text response if text input is provided
60
- if text_prompts:
61
  model = genai.GenerativeModel(
62
  model_name='gemini-pro',
63
  generation_config=generation_config,
@@ -67,8 +56,15 @@ def send_message():
67
  response_text = response.text if hasattr(response, "text") else "No response text found."
68
  st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
69
 
70
- # Generate image response if images are uploaded
71
- if image_prompts:
 
 
 
 
 
 
 
72
  model = genai.GenerativeModel(
73
  model_name='gemini-pro-vision',
74
  generation_config=generation_config,
@@ -77,10 +73,10 @@ def send_message():
77
  response = model.generate_content(image_prompts)
78
  response_text = response.text if hasattr(response, "text") else "No response text found."
79
  for prompt in image_prompts:
80
- st.session_state['chat_history'].append(prompt) # Append images to history
81
  st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
82
 
83
- # Clear the user input and generate a new key for the file uploader widget to reset it
84
  st.session_state.user_input = ''
85
  st.session_state.uploaded_files = []
86
  st.session_state.file_uploader_key = str(uuid.uuid4())
@@ -88,10 +84,9 @@ def send_message():
88
  # Function to clear conversation
89
  def clear_conversation():
90
  st.session_state['chat_history'] = []
91
- st.session_state['file_uploader_key'] = str(uuid.uuid4())
92
 
93
- # Multiline text input for the user to send messages
94
- user_input = st.text_area("Enter your message here:", key="user_input", value="")
95
 
96
  # File uploader for images
97
  uploaded_files = st.file_uploader(
 
3
  import io
4
  import base64
5
  import uuid
 
6
 
7
+ # Placeholder import - Replace with the actual import once you have the correct library
8
  import google.generativeai as genai
9
 
10
+ # Set up your API key securely, don't hardcode it in production
11
+ api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
12
  genai.configure(api_key=api_key)
13
 
14
+ # Placeholder for the generative model configuration
15
  generation_config = genai.GenerationConfig(
16
  temperature=0.9,
17
  max_output_tokens=3000
18
  )
19
 
20
+ # Placeholder for safety settings
21
  safety_settings = []
22
 
23
  # Initialize session state for chat history and file uploader key
 
42
  user_input = st.session_state.user_input
43
  uploaded_files = st.session_state.uploaded_files
44
 
 
 
 
45
  # Process text input for multi-turn conversation
46
  if user_input:
47
+ text_prompt = {"role": "user", "parts": [{"text": user_input}]}
48
+ st.session_state['chat_history'].append(text_prompt)
49
+ # Actual API call for generating a text response from the Gemini Pro model
 
 
 
 
 
 
 
 
 
50
  model = genai.GenerativeModel(
51
  model_name='gemini-pro',
52
  generation_config=generation_config,
 
56
  response_text = response.text if hasattr(response, "text") else "No response text found."
57
  st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
58
 
59
+ # Process uploaded images for single-turn conversation
60
+ if uploaded_files:
61
+ image_prompts = []
62
+ for uploaded_file in uploaded_files:
63
+ image = Image.open(uploaded_file).convert("RGB")
64
+ image_base64 = get_image_base64(image)
65
+ image_prompt = {"role": "user", "parts": [{"mime_type": uploaded_file.type, "data": image_base64}]}
66
+ image_prompts.append(image_prompt)
67
+ # Actual API call for generating a response from the Gemini Pro Vision model
68
  model = genai.GenerativeModel(
69
  model_name='gemini-pro-vision',
70
  generation_config=generation_config,
 
73
  response = model.generate_content(image_prompts)
74
  response_text = response.text if hasattr(response, "text") else "No response text found."
75
  for prompt in image_prompts:
76
+ st.session_state['chat_history'].append(prompt)
77
  st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
78
 
79
+ # Clear input and uploaded files after processing
80
  st.session_state.user_input = ''
81
  st.session_state.uploaded_files = []
82
  st.session_state.file_uploader_key = str(uuid.uuid4())
 
84
  # Function to clear conversation
85
  def clear_conversation():
86
  st.session_state['chat_history'] = []
 
87
 
88
+ # Input field for text messages
89
+ user_input = st.text_area("Enter your message here:", key="user_input")
90
 
91
  # File uploader for images
92
  uploaded_files = st.file_uploader(