Spaces:
Runtime error
Runtime error
File size: 1,378 Bytes
c3cd834 99ec16a c3cd834 74fa8e5 99ec16a 74fa8e5 c3cd834 74fa8e5 37ac5ae 74fa8e5 485a5d1 74fa8e5 0195449 74fa8e5 b118f5f 37ac5ae 74fa8e5 37ac5ae 74fa8e5 beecf3e 74fa8e5 76fcda3 74fa8e5 beecf3e 49bb563 74fa8e5 49bb563 74fa8e5 49bb563 74fa8e5 49bb563 74fa8e5 49bb563 74fa8e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import streamlit as st
from google.generativeai import GenerativeModel, Part, Content
# Configure API key
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNME"
model_name = "gemini-pro"
# Create model object
model = GenerativeModel(api_key=api_key, model_name=model_name)
# Create chatbot interface
st.title("Gemini API Chatbot")
# Get chat history from session state
chat_history = st.session_state.get("chat_history", [])
# Get user input from text box
user_input = st.text_input("You")
# Check if user input is not empty
if user_input:
# Create user message object
user_message = Content(parts=[Part(text=user_input)], role="user")
# Add user message to chat history
chat_history.append(user_message)
# Display user message with markdown
st.markdown(f"**You:** {user_input}")
# Get model response with start_chat method
with st.spinner("Thinking..."):
response = model.generate_content(chat_history)
# Get response text from response object
response_text = response.contents[-1].parts[0].text
# Add response message to chat history
chat_history.append(Content(parts=[Part(text=response_text)], role="assistant"))
# Display response message with markdown
st.markdown(f"**Gemini Bot:** {response_text}")
# Update session state with chat history
st.session_state["chat_history"] = chat_history
|