ziyadsuper2017 commited on
Commit
c748174
·
1 Parent(s): 6447954

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -70
app.py CHANGED
@@ -1,94 +1,108 @@
1
- import streamlit as st # This imports the streamlit module for creating the UI
2
- import google.generativeai as genai # This imports the google generative AI module for using the model
3
- import sqlite3 # This imports the sqlite3 module for working with the database
4
  from streamlit import file_uploader
5
 
6
  # Database setup
7
- conn = sqlite3.connect('chat_history.db') # This creates a connection to the chat_history.db file
8
- c = conn.cursor() # This creates a cursor object to execute SQL commands
9
 
10
  c.execute('''
11
  CREATE TABLE IF NOT EXISTS history
12
  (role TEXT, message TEXT)
13
- ''') # This creates a table named history with two columns: role and message
14
-
15
- # Generative AI setup
16
- api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" # This is where you put your API key for the generative AI service
17
- genai.configure(api_key=api_key) # This configures the genai module with your API key
18
 
19
  generation_config = {
20
- "temperature": 0.9, # This is a parameter that controls the randomness of the generated text
21
- "max_output_tokens": 500 # This is a parameter that limits the maximum number of tokens in the generated text
22
  }
23
 
24
- safety_settings = [] # This is a list of safety settings that can filter out harmful or inappropriate content
25
 
26
  # Streamlit UI
27
- st.title("Chatbot") # This displays a title for the UI
28
 
29
- chat_history = st.session_state.get("chat_history", []) # This gets the chat history from the session state or creates an empty list
30
 
31
- if len(chat_history) % 2 == 0: # This checks if the chat history has an even number of messages
32
- role = "user" # This sets the role to user
33
  else:
34
- role = "model" # This sets the role to model
35
-
36
- for message in chat_history: # This loops through each message in the chat history
37
- r, t = message["role"], message["parts"][0]["text"] # This extracts the role and the text from the message
38
- st.markdown(f"**{r.title()}:** {t}") # This displays the role and the text in markdown format
39
-
40
- user_input = st.text_input("") # This creates a text input widget for the user
41
 
42
  # File uploader for images
43
- uploaded_file = st.file_uploader("Upload an image (optional)") # This creates a file uploader widget for the user to upload an image file
44
-
45
- if user_input: # This checks if the user has entered some text
46
- chat_history.append({"role": role, "parts": [{"text": user_input}]}) # This appends the user input to the chat history
47
-
48
- if role == "user": # This checks if the role is user
49
- # Check if an image is uploaded
50
- image_parts = [] # This creates an empty list for the image parts
51
- if uploaded_file: # This checks if the user has uploaded a file
52
- image_parts.append({ # This appends a dictionary with the image information to the image parts list
53
- "mime_type": uploaded_file.type, # This gets the mime type of the file
54
- "data": uploaded_file.read() # This reads the bytes of the file
55
- })
56
-
57
- # Choose the model name based on the image parts
58
- if image_parts: # This checks if the image parts list is not empty
59
- model_name = "gemini-pro-vision" # This sets the model name to gemini-pro-vision
60
- else:
61
- model_name = "gemini-pro" # This sets the model name to gemini-pro
62
-
63
- # Create the generative model object
64
  model = genai.GenerativeModel(
65
- model_name=model_name, # This passes the model name to the model object
66
- generation_config=generation_config, # This passes the generation config to the model object
67
- safety_settings=safety_settings # This passes the safety settings to the model object
68
  )
69
-
70
- # Generate response based on text and image
71
- response = model.generate_content(chat_history + image_parts) # This generates a response from the model based on the chat history and the image parts
72
- response_text = response.text # This gets the text of the response
73
- chat_history.append({"role": "model", "parts": [{"text": response_text}]}) # This appends the response text to the chat history
74
-
75
- st.session_state["chat_history"] = chat_history # This updates the session state with the chat history
76
-
77
- for message in chat_history: # This loops through each message in the chat history
78
- r, t = message["role"], message["parts"][0]["text"] # This extracts the role and the text from the message
79
- st.markdown(f"**{r.title()}:** {t}") # This displays the role and the text in markdown format
80
-
81
- if st.button("Display History"): # This creates a button widget for displaying the history
82
- c.execute("SELECT * FROM history") # This executes a SQL command to select all the rows from the history table
83
- rows = c.fetchall() # This fetches all the rows from the cursor object
84
-
85
- for row in rows: # This loops through each row in the rows list
86
- st.markdown(f"**{row[0].title()}:** {row[1]}") # This displays the role and the message in markdown format
87
 
88
  # Save chat history to database
89
- for message in chat_history: # This loops through each message in the chat history
90
  c.execute("INSERT INTO history VALUES (?, ?)",
91
- (message["role"], message["parts"][0]["text"])) # This executes a SQL command to insert the role and the message into the history table
92
- conn.commit() # This commits the changes to the database
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
- conn.close() # This closes the connection to the database
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import google.generativeai as genai
3
+ import sqlite3
4
  from streamlit import file_uploader
5
 
6
  # Database setup
7
+ conn = sqlite3.connect('chat_history.db')
8
+ c = conn.cursor()
9
 
10
  c.execute('''
11
  CREATE TABLE IF NOT EXISTS history
12
  (role TEXT, message TEXT)
13
+ ''')
14
+
15
+ # Generative AI setup
16
+ api_key = "YOUR_API_KEY"
17
+ genai.configure(api_key=api_key)
18
 
19
  generation_config = {
20
+ "temperature": 0.9,
21
+ "max_output_tokens": 500
22
  }
23
 
24
+ safety_settings = []
25
 
26
  # Streamlit UI
27
+ st.title("Chatbot")
28
 
29
+ chat_history = st.session_state.get("chat_history", [])
30
 
31
+ if len(chat_history) % 2 == 0:
32
+ role = "user"
33
  else:
34
+ role = "model"
35
+
36
+ for message in chat_history:
37
+ r, t = message["role"], message["parts"][0]["text"]
38
+ st.markdown(f"**{r.title()}:** {t}")
39
+
40
+ user_input = st.text_input("")
41
 
42
  # File uploader for images
43
+ uploaded_file = st.file_uploader("Upload an image (optional)")
44
+
45
+ if user_input:
46
+ chat_history.append({"role": role, "parts": [{"text": user_input}]})
47
+
48
+ if role == "user":
49
+ # If only text is entered, follow the previous code
50
+ model_name = "gemini-pro"
51
+
 
 
 
 
 
 
 
 
 
 
 
 
52
  model = genai.GenerativeModel(
53
+ model_name=model_name,
54
+ generation_config=generation_config,
55
+ safety_settings=safety_settings
56
  )
57
+
58
+ response = model.generate_content(chat_history)
59
+ response_text = response.text
60
+ chat_history.append({"role": "model", "parts": [{"text": response_text}]})
61
+
62
+ st.session_state["chat_history"] = chat_history
63
+
64
+ for message in chat_history:
65
+ r, t = message["role"], message["parts"][0]["text"]
66
+ st.markdown(f"**{r.title()}:** {t}")
67
+
68
+ if st.button("Display History"):
69
+ c.execute("SELECT * FROM history")
70
+ rows = c.fetchall()
71
+
72
+ for row in rows:
73
+ st.markdown(f"**{row[0].title()}:** {row[1]}")
 
74
 
75
  # Save chat history to database
76
+ for message in chat_history:
77
  c.execute("INSERT INTO history VALUES (?, ?)",
78
+ (message["role"], message["parts"][0]["text"]))
79
+ conn.commit()
80
+
81
+ conn.close()
82
+
83
+ # Separate section for image uploading
84
+ st.title("Image Description Generator")
85
+
86
+ uploaded_file = st.file_uploader("Upload an image here", type=["png", "jpg", "jpeg"])
87
+
88
+ if uploaded_file:
89
+ image_parts = [
90
+ {
91
+ "mime_type": uploaded_file.type,
92
+ "data": uploaded_file.read()
93
+ },
94
+ ]
95
+
96
+ prompt_parts = [
97
+ "Describe the image:",
98
+ image_parts[0],
99
+ ]
100
 
101
+ model = genai.GenerativeModel(
102
+ model_name="gemini-pro-vision",
103
+ generation_config=generation_config,
104
+ safety_settings=safety_settings
105
+ )
106
+
107
+ response = model.generate_content(prompt_parts)
108
+ st.markdown(f"**Model's description:** {response.text}")