File size: 2,494 Bytes
d1f86df
837873a
 
 
1d3466d
7a4fa7e
837873a
c1bab7b
1d3466d
 
837873a
 
2abea13
c748174
837873a
 
 
5b0a6ee
e21ddc9
2abea13
837873a
e21ddc9
 
837873a
 
0df9787
837873a
7a4fa7e
837873a
 
 
 
 
 
7a4fa7e
0df9787
f67d957
0df9787
72e65b5
4fa0fd6
 
72e65b5
4fa0fd6
 
 
 
 
 
 
 
 
 
 
 
837873a
 
 
 
 
 
 
 
0df9787
837873a
 
 
0df9787
837873a
0df9787
837873a
 
 
 
 
 
72e65b5
837873a
 
 
0df9787
837873a
0df9787
837873a
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import streamlit as st
import google.generativeai as genai
import sqlite3 
from streamlit import file_uploader

# Database setup
conn = sqlite3.connect('chat_history.db')  
c = conn.cursor()

c.execute('''
          CREATE TABLE IF NOT EXISTS history  
          (role TEXT, message TEXT)
          ''')

# Generative AI setup
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"   
genai.configure(api_key=api_key)

generation_config = {
  "temperature": 0.9,
  "max_output_tokens": 3000  
}

safety_settings = []

# Streamlit UI
st.title("Chatbot")

chat_history = st.session_state.get("chat_history", []) 

if len(chat_history) % 2 == 0:
    role = "user"
else:
    role = "model"

for message in chat_history:
    r, t = message["role"], message["parts"][0]["text"]
    st.markdown(f"**{r.title()}:** {t}")
    
# Use file_uploader for text and image input   
user_input = st.file_uploader("Upload a text or an image file here", type=["txt", "png", "jpg", "jpeg"])  
if user_input:
    # Check the file type and create the prompt parts accordingly
    if user_input.type == "text/plain":
        prompt_parts = [user_input.getvalue().decode("utf-8")]
        model_name = "gemini-pro" # Use the text-only model
    else:
        prompt_parts = [{
            "mime_type": user_input.type,
            "data": user_input.read() 
        }]
        model_name = "gemini-pro-vision" # Use the multimodal model
    
    chat_history.append({"role": role, "parts": prompt_parts}) 
    if role == "user":
        
        # Model code
        model = genai.GenerativeModel(
            model_name=model_name,   
            generation_config=generation_config,
            safety_settings=safety_settings
        )
        
        response = model.generate_content(chat_history) 
        response_text = response.text
        chat_history.append({"role": "model", "parts": [{"text": response_text}]})
        
        st.session_state["chat_history"] = chat_history
        
for message in chat_history:
    r, t = message["role"], message["parts"][0]["text"]
    st.markdown(f"**{r.title()}:** {t}")
if st.button("Display History"): 
    c.execute("SELECT * FROM history") 
    rows = c.fetchall() 

    for row in rows: 
        st.markdown(f"**{row[0].title()}:** {row[1]}") 
        
# Save chat history to database
for message in chat_history: 
    c.execute("INSERT INTO history VALUES (?, ?)", 
            (message["role"], message["parts"][0]["text"])) 
    conn.commit() 

conn.close()