File size: 2,195 Bytes
c3cd834
dfc64fd
 
37ac5ae
c3cd834
b118f5f
dfc64fd
c3cd834
37ac5ae
c3cd834
 
b118f5f
c3cd834
 
 
d68cfc4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3cd834
 
37ac5ae
b118f5f
 
 
37ac5ae
 
 
b118f5f
485a5d1
b118f5f
c3cd834
b118f5f
76fcda3
0bba5f4
37ac5ae
b118f5f
 
 
 
 
 
37ac5ae
1d4afe4
37ac5ae
b118f5f
 
37ac5ae
 
76fcda3
b118f5f
1d4afe4
dfc64fd
b118f5f
 
 
 
 
 
 
1d4afe4
76fcda3
37ac5ae
 
b118f5f
 
76fcda3
b118f5f
 
37ac5ae
b118f5f
1d4afe4
dfc64fd
b118f5f
 
 
 
 
 
 
1d4afe4
b118f5f
37ac5ae
dbe3519
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import streamlit as st
import pandas as pd
import numpy as np
import google.generativeai as genai

# API key 
genai.configure(api_key="AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LwNM")

# Model settings
generation_config = {
  "temperature": 0.9,
  "max_output_tokens": 2048    
}

safety_settings = [
  {
     "category": "HARM_CATEGORY_HARASSMENT",
     "threshold": "BLOCK_MEDIUM_AND_ABOVE"  
  },
  { 
     "category": "HARM_CATEGORY_HATE_SPEECH",
     "threshold": "BLOCK_MEDIUM_AND_ABOVE"
  },
  {
     "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", 
     "threshold": "BLOCK_MEDIUM_AND_ABOVE"
  },
  {
     "category": "HARM_CATEGORY_DANGEROUS_CONTENT",  
     "threshold": "BLOCK_MEDIUM_AND_ABOVE"
  }
]

model = genai.GenerativeModel(
  model_name="gemini-pro", 
  generation_config=generation_config,
  safety_settings=safety_settings
)

# Chatbot interface
st.title("Gemini API Chatbot")

chat_history = st.session_state.get("chat_history", []) 

chat_container = st.container()

for message in chat_history:
  # Display message
  if message.parts:
    role = "assistant" if message.role == "system" else "user"
    text = message.parts[0].text 
  else:
    role = message.role
    text = message.content
  
  chat_container.markdown(f"**{role}:** {text}")

# Input  
user_input = st.text_input("You")

if user_input:

  # Create message
  user_message = genai.GenerativeContent(
      parts=[genai.Part(text=user_input)], # Wrap in a list
      role=genai.Role.USER
  )

  # Append message  
  chat_history.append(user_message)

  # Display message
  chat_container.markdown(f"**user:** {user_input}")

  # Get response
  with st.spinner("Thinking..."):
    convo = model.start_chat(chat_history)
    response = convo.last

  # Extract response text
  response_text = response.parts[0].text
  
  # Create response message
  assistant_message = genai.GenerativeContent(
      parts=[genai.Part(text=response_text)], # Wrap in a list
      role=genai.Role.ASSISTANT
  )

  # Append response 
  chat_history.append(assistant_message)

  # Display response 
  chat_container.markdown(f"**assistant:** {response_text}")
  
  # Update session state
  st.session_state["chat_history"] = chat_history