File size: 3,303 Bytes
c748174
 
 
4d21d63
1d3466d
7a4fa7e
c748174
 
1d3466d
 
7a4fa7e
fc87a94
c748174
 
 
683698a
c748174
5b0a6ee
e21ddc9
c748174
 
e21ddc9
 
c748174
7a4fa7e
 
c748174
7a4fa7e
c748174
fc87a94
c748174
 
7a4fa7e
c748174
 
 
 
 
 
 
afb1a49
c748174
 
 
 
 
 
 
afb1a49
c748174
 
 
afb1a49
c748174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cecacb2
 
c748174
cecacb2
c748174
 
 
2ae0133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c748174
 
 
 
 
 
 
d78e5c8
 
 
 
c748174
 
 
 
 
 
 
 
d78e5c8
c748174
 
7a4fa7e
c748174
 
 
 
 
 
 
d78e5c8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import streamlit as st
import google.generativeai as genai
import sqlite3
from streamlit import file_uploader

# Database setup
conn = sqlite3.connect('chat_history.db') 
c = conn.cursor() 

c.execute('''
          CREATE TABLE IF NOT EXISTS history  
          (role TEXT, message TEXT)  
          ''')

# Generative AI setup
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" 
genai.configure(api_key=api_key)

generation_config = {
  "temperature": 0.9,
  "max_output_tokens": 500 
}

safety_settings = [] 

# Streamlit UI
st.title("Chatbot") 

chat_history = st.session_state.get("chat_history", [])

if len(chat_history) % 2 == 0: 
  role = "user" 
else:  
  role = "model"

for message in chat_history: 
  r, t = message["role"], message["parts"][0]["text"] 
  st.markdown(f"**{r.title()}:** {t}") 

user_input = st.text_input("") 

if user_input: 
  chat_history.append({"role": role, "parts": [{"text": user_input}]})

  if role == "user": 
    # If only text is entered, follow the previous code
    model_name = "gemini-pro"

    model = genai.GenerativeModel(
      model_name=model_name,
      generation_config=generation_config,
      safety_settings=safety_settings
    )

    response = model.generate_content(chat_history)
    response_text = response.text 
    chat_history.append({"role": "model", "parts": [{"text": response_text}]}) 

  st.session_state["chat_history"] = chat_history 

  for message in chat_history: 
    r, t = message["role"], message["parts"][0]["text"] 
    st.markdown(f"**{r.title()}:** {t}") 

if st.button("Display History"): 
    c.execute("SELECT * FROM history") 
    rows = c.fetchall() 

    for row in rows: 
        st.markdown(f"**{row[0].title()}:** {row[1]}") 
        
# Save chat history to database
for message in chat_history: 
    c.execute("INSERT INTO history VALUES (?, ?)", 
            (message["role"], message["parts"][0]["text"])) 
    conn.commit() 

# Create a placeholder for the history display
history_placeholder = st.empty()

# Use the key argument for the buttons
if st.button("Display History", key="display"): 
    c.execute("SELECT * FROM history") 
    rows = c.fetchall() 

    # Fill the placeholder with the history
    for row in rows: 
        history_placeholder.markdown(f"**{row[0].title()}:** {row[1]}") 

# Define a function to clear the placeholder
def clear_history():
    history_placeholder.empty()

# Use the key argument for the buttons
if st.button("Reset History", key="reset", on_click=clear_history):
    pass


conn.close()

# Separate section for image uploading
st.title("Image Description Generator")

uploaded_file = st.file_uploader("Upload an image here", type=["png", "jpg", "jpeg"])

# Text input for asking questions about the image
image_question = st.text_input("Ask something about the image:")

if uploaded_file and image_question:
   image_parts = [
     {
       "mime_type": uploaded_file.type,
       "data": uploaded_file.read() 
     },
   ]

   prompt_parts = [
     image_question,
     image_parts[0],
   ]

   model = genai.GenerativeModel(
     model_name="gemini-pro-vision", 
     generation_config=generation_config, 
     safety_settings=safety_settings 
   )
   
   response = model.generate_content(prompt_parts)
   st.markdown(f"**Model's answer:** {response.text}")