ziyadsuper2017 commited on
Commit
40e8df5
·
1 Parent(s): 8aae6cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -32
app.py CHANGED
@@ -4,20 +4,18 @@ import io
4
  import base64
5
  import uuid
6
 
7
- # Placeholder import - Replace with the actual import once you have the correct library
8
  import google.generativeai as genai
9
 
10
- # Set up your API key securely, don't hardcode it in production
11
- api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
12
  genai.configure(api_key=api_key)
13
 
14
- # Placeholder for the generative model configuration
15
  generation_config = genai.GenerationConfig(
16
  temperature=0.9,
17
  max_output_tokens=3000
18
  )
19
 
20
- # Placeholder for safety settings
21
  safety_settings = []
22
 
23
  # Initialize session state for chat history and file uploader key
@@ -37,6 +35,11 @@ def get_image_base64(image):
37
  img_str = base64.b64encode(buffered.getvalue()).decode()
38
  return img_str
39
 
 
 
 
 
 
40
  # Function to send message and clear input
41
  def send_message():
42
  user_input = st.session_state.user_input
@@ -44,48 +47,47 @@ def send_message():
44
 
45
  # Process text input for multi-turn conversation
46
  if user_input:
47
- text_prompt = {"role": "user", "parts": [{"text": user_input}]}
48
- st.session_state['chat_history'].append(text_prompt)
49
- # Actual API call for generating a text response from the Gemini Pro model
50
- model = genai.GenerativeModel(
51
- model_name='gemini-pro',
52
- generation_config=generation_config,
53
- safety_settings=safety_settings
54
- )
55
- response = model.generate_content(st.session_state['chat_history'])
56
- response_text = response.text if hasattr(response, "text") else "No response text found."
57
- st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
58
 
59
- # Process uploaded images for single-turn conversation
60
  if uploaded_files:
61
- image_prompts = []
62
- for uploaded_file in uploaded_files:
63
- image = Image.open(uploaded_file).convert("RGB")
64
- image_base64 = get_image_base64(image)
65
- image_prompt = {"role": "user", "parts": [{"mime_type": uploaded_file.type, "data": image_base64}]}
66
- image_prompts.append(image_prompt)
67
- # Actual API call for generating a response from the Gemini Pro Vision model
68
- model = genai.GenerativeModel(
69
  model_name='gemini-pro-vision',
70
  generation_config=generation_config,
71
  safety_settings=safety_settings
72
  )
73
- response = model.generate_content(image_prompts)
74
  response_text = response.text if hasattr(response, "text") else "No response text found."
 
 
75
  for prompt in image_prompts:
76
  st.session_state['chat_history'].append(prompt)
77
  st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
 
 
 
 
 
 
 
 
 
 
 
78
 
79
- # Clear input and uploaded files after processing
80
  st.session_state.user_input = ''
81
  st.session_state.uploaded_files = []
82
  st.session_state.file_uploader_key = str(uuid.uuid4())
83
 
84
- # Function to clear conversation
85
- def clear_conversation():
86
- st.session_state['chat_history'] = []
87
-
88
- # Input field for text messages
89
  user_input = st.text_area("Enter your message here:", key="user_input")
90
 
91
  # File uploader for images
 
4
  import base64
5
  import uuid
6
 
7
+ # Assuming google.generativeai is the correct import based on your description
8
  import google.generativeai as genai
9
 
10
+ # Configure the API key (should be set as an environment variable or secure storage in production)
11
+ api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" # Replace with your actual API key
12
  genai.configure(api_key=api_key)
13
 
 
14
  generation_config = genai.GenerationConfig(
15
  temperature=0.9,
16
  max_output_tokens=3000
17
  )
18
 
 
19
  safety_settings = []
20
 
21
  # Initialize session state for chat history and file uploader key
 
35
  img_str = base64.b64encode(buffered.getvalue()).decode()
36
  return img_str
37
 
38
+ # Function to clear conversation
39
+ def clear_conversation():
40
+ st.session_state['chat_history'] = []
41
+ st.session_state['file_uploader_key'] = str(uuid.uuid4())
42
+
43
  # Function to send message and clear input
44
  def send_message():
45
  user_input = st.session_state.user_input
 
47
 
48
  # Process text input for multi-turn conversation
49
  if user_input:
50
+ st.session_state['chat_history'].append({"role": "user", "parts": [{"text": user_input}]})
 
 
 
 
 
 
 
 
 
 
51
 
52
+ # Check if images are uploaded
53
  if uploaded_files:
54
+ # Prepare image prompts for single-turn conversation
55
+ image_prompts = [{
56
+ "role": "user",
57
+ "parts": [{"mime_type": uploaded_file.type, "data": get_image_base64(Image.open(uploaded_file))}]
58
+ } for uploaded_file in uploaded_files]
59
+
60
+ # Use Gemini Pro Vision model for image-based interaction
61
+ vision_model = genai.GenerativeModel(
62
  model_name='gemini-pro-vision',
63
  generation_config=generation_config,
64
  safety_settings=safety_settings
65
  )
66
+ response = vision_model.generate_content(image_prompts)
67
  response_text = response.text if hasattr(response, "text") else "No response text found."
68
+
69
+ # Append images and response to chat history
70
  for prompt in image_prompts:
71
  st.session_state['chat_history'].append(prompt)
72
  st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
73
+
74
+ # If no images are uploaded, use Gemini Pro model for text-based interaction
75
+ elif user_input:
76
+ text_model = genai.GenerativeModel(
77
+ model_name='gemini-pro',
78
+ generation_config=generation_config,
79
+ safety_settings=safety_settings
80
+ )
81
+ response = text_model.generate_content(st.session_state['chat_history'])
82
+ response_text = response.text if hasattr(response, "text") else "No response text found."
83
+ st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
84
 
85
+ # Clear the user input and generate a new key for the file uploader widget to reset it
86
  st.session_state.user_input = ''
87
  st.session_state.uploaded_files = []
88
  st.session_state.file_uploader_key = str(uuid.uuid4())
89
 
90
+ # Multiline text input for the user to send messages
 
 
 
 
91
  user_input = st.text_area("Enter your message here:", key="user_input")
92
 
93
  # File uploader for images