File size: 3,204 Bytes
d1f86df
837873a
 
 
1d3466d
7a4fa7e
837873a
c1bab7b
1d3466d
 
837873a
 
2abea13
c748174
837873a
 
 
5b0a6ee
e21ddc9
2abea13
837873a
e21ddc9
 
837873a
 
0df9787
837873a
7a4fa7e
837873a
 
 
 
 
 
7a4fa7e
0df9787
f67d957
0df9787
72e65b5
837873a
 
72e65b5
837873a
 
 
 
 
 
 
 
 
 
0df9787
837873a
 
 
0df9787
837873a
0df9787
837873a
 
 
 
 
 
72e65b5
837873a
 
 
0df9787
837873a
0df9787
837873a
 
 
 
 
 
 
 
8239be3
 
 
d351071
 
 
8239be3
 
 
 
 
d351071
8239be3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d351071
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import streamlit as st
import google.generativeai as genai
import sqlite3 
from streamlit import file_uploader

# Database setup
conn = sqlite3.connect('chat_history.db')  
c = conn.cursor()

c.execute('''
          CREATE TABLE IF NOT EXISTS history  
          (role TEXT, message TEXT)
          ''')

# Generative AI setup
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"   
genai.configure(api_key=api_key)

generation_config = {
  "temperature": 0.9,
  "max_output_tokens": 3000  
}

safety_settings = []

# Streamlit UI
st.title("Chatbot")

chat_history = st.session_state.get("chat_history", []) 

if len(chat_history) % 2 == 0:
    role = "user"
else:
    role = "model"

for message in chat_history:
    r, t = message["role"], message["parts"][0]["text"]
    st.markdown(f"**{r.title()}:** {t}")
    
# Use text_area for multiline input   
user_input = st.text_area("", height=5)  
if user_input:
    chat_history.append({"role": role, "parts": [{"text": user_input}]}) 
    if role == "user":
        
        # Model code
        model_name = "gemini-pro"
        model = genai.GenerativeModel(
            model_name=model_name,   
            generation_config=generation_config,
            safety_settings=safety_settings
        )
        
        response = model.generate_content(chat_history) 
        response_text = response.text
        chat_history.append({"role": "model", "parts": [{"text": response_text}]})
        
        st.session_state["chat_history"] = chat_history
        
for message in chat_history:
    r, t = message["role"], message["parts"][0]["text"]
    st.markdown(f"**{r.title()}:** {t}")
if st.button("Display History"): 
    c.execute("SELECT * FROM history") 
    rows = c.fetchall() 

    for row in rows: 
        st.markdown(f"**{row[0].title()}:** {row[1]}") 
        
# Save chat history to database
for message in chat_history: 
    c.execute("INSERT INTO history VALUES (?, ?)", 
            (message["role"], message["parts"][0]["text"])) 
    conn.commit() 

conn.close()

# Separate section for image uploading
st.title("Image Description Generator")

# Change the file_uploader to accept multiple files
uploaded_files = st.file_uploader("Upload one or more images here", type=["png", "jpg", "jpeg"], accept_multiple_files=True)

# Text input for asking questions about the images
image_question = st.text_input("Ask something about the images:")

# Loop through the uploaded files and display them
for uploaded_file in uploaded_files:
    # Display the image
    st.image(uploaded_file)

    # Check if the user has entered a question
    if image_question:
        image_parts = [
            {
            "mime_type": uploaded_file.type,
            "data": uploaded_file.read() 
            },
        ]

        prompt_parts = [
            image_question,
            image_parts[0],
        ]

        model = genai.GenerativeModel(
            model_name="gemini-pro-vision", 
            generation_config=generation_config, 
            safety_settings=safety_settings 
        )
        
        response = model.generate_content(prompt_parts)
        st.markdown(f"**Model's answer for {uploaded_file.name}:** {response.text}")