Spaces:
Runtime error
Runtime error
Commit
·
f473a48
1
Parent(s):
dbe3519
Update app.py
Browse files
app.py
CHANGED
@@ -1,96 +1,92 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
-
import pandas as pd
|
3 |
-
import numpy as np
|
4 |
import google.generativeai as genai
|
5 |
|
6 |
-
# API key
|
7 |
-
genai.configure(api_key="
|
8 |
-
|
9 |
-
# Model settings
|
10 |
-
generation_config = {
|
11 |
-
"temperature": 0.9,
|
12 |
-
"max_output_tokens": 2048
|
13 |
-
}
|
14 |
-
|
15 |
-
safety_settings = [
|
16 |
-
{
|
17 |
-
"category": "HARM_CATEGORY_HARASSMENT",
|
18 |
-
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
19 |
-
},
|
20 |
-
{
|
21 |
-
"category": "HARM_CATEGORY_HATE_SPEECH",
|
22 |
-
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
23 |
-
},
|
24 |
-
{
|
25 |
-
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
26 |
-
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
27 |
-
},
|
28 |
-
{
|
29 |
-
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
30 |
-
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
31 |
-
}
|
32 |
-
]
|
33 |
|
|
|
34 |
model = genai.GenerativeModel(
|
35 |
-
model_name="gemini-pro",
|
36 |
-
generation_config=
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
)
|
39 |
|
40 |
-
#
|
41 |
-
st.title("Gemini API Chatbot")
|
42 |
|
43 |
-
|
|
|
44 |
|
|
|
45 |
chat_container = st.container()
|
46 |
|
|
|
47 |
for message in chat_history:
|
48 |
-
#
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
role = message.role
|
54 |
-
text = message.content
|
55 |
-
|
56 |
chat_container.markdown(f"**{role}:** {text}")
|
57 |
|
58 |
-
#
|
59 |
user_input = st.text_input("You")
|
60 |
|
|
|
61 |
if user_input:
|
62 |
|
63 |
-
# Create message
|
64 |
user_message = genai.GenerativeContent(
|
65 |
-
|
66 |
-
|
67 |
)
|
68 |
|
69 |
-
#
|
70 |
chat_history.append(user_message)
|
71 |
|
72 |
-
# Display message
|
73 |
chat_container.markdown(f"**user:** {user_input}")
|
74 |
|
75 |
-
# Get response
|
76 |
with st.spinner("Thinking..."):
|
77 |
convo = model.start_chat(chat_history)
|
78 |
response = convo.last
|
79 |
|
80 |
-
#
|
81 |
response_text = response.parts[0].text
|
82 |
-
|
83 |
-
# Create response message
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
)
|
88 |
|
89 |
-
#
|
90 |
-
chat_history.append(
|
91 |
|
92 |
-
# Display response
|
93 |
chat_container.markdown(f"**assistant:** {response_text}")
|
94 |
-
|
95 |
-
# Update session state
|
96 |
-
st.session_state["chat_history"] = chat_history
|
|
|
1 |
+
# Import modules
|
2 |
import streamlit as st
|
|
|
|
|
3 |
import google.generativeai as genai
|
4 |
|
5 |
+
# Configure API key
|
6 |
+
genai.configure(api_key="YOUR_API_KEY_HERE")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
+
# Create model object
|
9 |
model = genai.GenerativeModel(
|
10 |
+
model_name="gemini-pro", # Specify model name
|
11 |
+
generation_config={
|
12 |
+
"temperature": 0.9, # Set generation temperature
|
13 |
+
"max_output_tokens": 2048 # Set maximum output tokens
|
14 |
+
},
|
15 |
+
safety_settings=[ # Set safety settings
|
16 |
+
{
|
17 |
+
"category": "HARM_CATEGORY_HARASSMENT",
|
18 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
22 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
26 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
30 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
31 |
+
}
|
32 |
+
]
|
33 |
)
|
34 |
|
35 |
+
# Create chatbot interface
|
36 |
+
st.title("Gemini API Chatbot") # Set title
|
37 |
|
38 |
+
# Get chat history from session state
|
39 |
+
chat_history = st.session_state.get("chat_history", [])
|
40 |
|
41 |
+
# Create container for chat messages
|
42 |
chat_container = st.container()
|
43 |
|
44 |
+
# Loop over chat history and display messages
|
45 |
for message in chat_history:
|
46 |
+
# Get message role and text
|
47 |
+
role = message.role
|
48 |
+
text = message.parts[0].text
|
49 |
+
|
50 |
+
# Display message with markdown
|
|
|
|
|
|
|
51 |
chat_container.markdown(f"**{role}:** {text}")
|
52 |
|
53 |
+
# Get user input from text box
|
54 |
user_input = st.text_input("You")
|
55 |
|
56 |
+
# Check if user input is not empty
|
57 |
if user_input:
|
58 |
|
59 |
+
# Create user message object
|
60 |
user_message = genai.GenerativeContent(
|
61 |
+
parts=[genai.Part(text=user_input)], # Wrap text in a list
|
62 |
+
role="user" # Set role to user
|
63 |
)
|
64 |
|
65 |
+
# Add user message to chat history
|
66 |
chat_history.append(user_message)
|
67 |
|
68 |
+
# Display user message with markdown
|
69 |
chat_container.markdown(f"**user:** {user_input}")
|
70 |
|
71 |
+
# Get model response with start_chat method
|
72 |
with st.spinner("Thinking..."):
|
73 |
convo = model.start_chat(chat_history)
|
74 |
response = convo.last
|
75 |
|
76 |
+
# Get response text from response object
|
77 |
response_text = response.parts[0].text
|
78 |
+
|
79 |
+
# Create response message object
|
80 |
+
response_message = genai.GenerativeContent(
|
81 |
+
parts=[genai.Part(text=response_text)], # Wrap text in a list
|
82 |
+
role="assistant" # Set role to assistant
|
83 |
)
|
84 |
|
85 |
+
# Add response message to chat history
|
86 |
+
chat_history.append(response_message)
|
87 |
|
88 |
+
# Display response message with markdown
|
89 |
chat_container.markdown(f"**assistant:** {response_text}")
|
90 |
+
|
91 |
+
# Update session state with chat history
|
92 |
+
st.session_state["chat_history"] = chat_history
|