File size: 2,652 Bytes
c748174
 
 
4d21d63
1d3466d
7a4fa7e
c748174
 
1d3466d
 
7a4fa7e
fc87a94
c748174
 
 
 
 
5b0a6ee
e21ddc9
c748174
 
e21ddc9
 
c748174
7a4fa7e
 
c748174
7a4fa7e
c748174
fc87a94
c748174
 
7a4fa7e
c748174
 
 
 
 
 
 
afb1a49
 
c748174
 
 
 
 
 
 
 
 
afb1a49
c748174
 
 
afb1a49
c748174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cecacb2
 
c748174
cecacb2
c748174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7a4fa7e
c748174
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import streamlit as st
import google.generativeai as genai
import sqlite3
from streamlit import file_uploader

# Database setup
conn = sqlite3.connect('chat_history.db') 
c = conn.cursor() 

c.execute('''
          CREATE TABLE IF NOT EXISTS history  
          (role TEXT, message TEXT)  
          ''')

# Generative AI setup
api_key = "YOUR_API_KEY" 
genai.configure(api_key=api_key)

generation_config = {
  "temperature": 0.9,
  "max_output_tokens": 500 
}

safety_settings = [] 

# Streamlit UI
st.title("Chatbot") 

chat_history = st.session_state.get("chat_history", [])

if len(chat_history) % 2 == 0: 
  role = "user" 
else:  
  role = "model"

for message in chat_history: 
  r, t = message["role"], message["parts"][0]["text"] 
  st.markdown(f"**{r.title()}:** {t}") 

user_input = st.text_input("") 

# File uploader for images
uploaded_file = st.file_uploader("Upload an image (optional)")

if user_input: 
  chat_history.append({"role": role, "parts": [{"text": user_input}]})

  if role == "user": 
    # If only text is entered, follow the previous code
    model_name = "gemini-pro"

    model = genai.GenerativeModel(
      model_name=model_name,
      generation_config=generation_config,
      safety_settings=safety_settings
    )

    response = model.generate_content(chat_history)
    response_text = response.text 
    chat_history.append({"role": "model", "parts": [{"text": response_text}]}) 

  st.session_state["chat_history"] = chat_history 

  for message in chat_history: 
    r, t = message["role"], message["parts"][0]["text"] 
    st.markdown(f"**{r.title()}:** {t}") 

if st.button("Display History"): 
    c.execute("SELECT * FROM history") 
    rows = c.fetchall() 

    for row in rows: 
        st.markdown(f"**{row[0].title()}:** {row[1]}") 
        
# Save chat history to database
for message in chat_history: 
    c.execute("INSERT INTO history VALUES (?, ?)", 
            (message["role"], message["parts"][0]["text"])) 
    conn.commit() 

conn.close()

# Separate section for image uploading
st.title("Image Description Generator")

uploaded_file = st.file_uploader("Upload an image here", type=["png", "jpg", "jpeg"])

if uploaded_file:
   image_parts = [
     {
       "mime_type": uploaded_file.type,
       "data": uploaded_file.read() 
     },
   ]

   prompt_parts = [
     "Describe the image:",
     image_parts[0],
   ]

   model = genai.GenerativeModel(
     model_name="gemini-pro-vision", 
     generation_config=generation_config, 
     safety_settings=safety_settings 
   )
   
   response = model.generate_content(prompt_parts)
   st.markdown(f"**Model's description:** {response.text}")