Spaces:
Sleeping
Sleeping
File size: 8,009 Bytes
d0ba0ce 944017e 02cc2be 72a2744 d0ba0ce 7c95914 d0ba0ce 7c95914 d0ba0ce 7c95914 44c0e78 668775b 44c0e78 72a2744 47f6195 7c95914 8d705f9 807533f 5c42a74 8d705f9 fb0ea79 abc43f8 fb0ea79 807533f fb0ea79 b41d273 561f7a8 32f029a fb0ea79 d0ba0ce 894c71a 266d4b2 ae22dfd d0ba0ce a98948f 72a2744 2df9243 7c95914 72a2744 7c95914 f9dbffb 7c95914 72a2744 2b04423 d0ba0ce 2b04423 7c95914 72a2744 dcd9708 72a2744 0d3004a 72a2744 5765602 72a2744 d0ba0ce 72a2744 d0ba0ce 72a2744 d0ba0ce abd1f1b 72a2744 0da8351 d0ba0ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
import streamlit as st
from PIL import Image
import time
import streamlit_analytics
from dotenv import load_dotenv
import pickle
from huggingface_hub import Repository
from PyPDF2 import PdfReader
from streamlit_extras.add_vertical_space import add_vertical_space
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.callbacks import get_openai_callback
import os
# Step 1: Clone the Dataset Repository
repo = Repository(
local_dir="Private_Book", # Local directory to clone the repository
repo_type="dataset", # Specify that this is a dataset repository
clone_from="Anne31415/Private_Book", # Replace with your repository URL
token=os.environ["HUB_TOKEN"] # Use the secret token to authenticate
)
repo.git_pull() # Pull the latest changes (if any)
# Step 2: Load the PDF File
pdf_path = "Private_Book/KOMBI_all2.pdf" # Replace with your PDF file path
with st.sidebar:
st.title('BinDoc GmbH')
st.markdown("Experience revolutionary interaction with BinDocs Chat App, leveraging state-of-the-art AI technology.")
add_vertical_space(1) # Adjust as per the desired spacing
st.markdown("""
Hello! I’m here to assist you with:<br><br>
📘 **Glossary Inquiries:**<br>
I can clarify terms like "DiGA", "AOP", or "BfArM", providing clear and concise explanations to help you understand our content better.<br><br>
🆘 **Help Page Navigation:**<br>
Ask me if you forgot your password or want to know more about topics related to the platform.<br><br>
📰 **Latest Whitepapers Insights:**<br>
Curious about our recent publications? Feel free to ask about our latest whitepapers!<br><br>
""", unsafe_allow_html=True)
add_vertical_space(1) # Adjust as per the desired spacing
st.write('Made with ❤️ by BinDoc GmbH')
api_key = os.getenv("OPENAI_API_KEY")
# Retrieve the API key from st.secrets
# Updated caching mechanism using st.cache_data
@st.cache_data(persist="disk") # Using persist="disk" to save cache across sessions
def load_vector_store(file_path, store_name, force_reload=False):
# Check if we need to force reload the vector store (e.g., when the PDF changes)
if force_reload or not os.path.exists(f"{store_name}.pkl"):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
text = load_pdf_text(file_path)
chunks = text_splitter.split_text(text=text)
embeddings = OpenAIEmbeddings()
VectorStore = FAISS.from_texts(chunks, embedding=embeddings)
with open(f"{store_name}.pkl", "wb") as f:
pickle.dump(VectorStore, f)
else:
with open(f"{store_name}.pkl", "rb") as f:
VectorStore = pickle.load(f)
return VectorStore
# Utility function to load text from a PDF
def load_pdf_text(file_path):
pdf_reader = PdfReader(file_path)
text = ""
for page in pdf_reader.pages:
text += page.extract_text() or "" # Add fallback for pages where text extraction fails
return text
def load_chatbot():
return load_qa_chain(llm=OpenAI(), chain_type="stuff")
def main():
try:
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
# Main content
st.title("Welcome to BinDocs ChatBot! 🤖")
# Start tracking user interactions
with streamlit_analytics.track():
if not os.path.exists(pdf_path):
st.error("File not found. Please check the file path.")
return
VectorStore = load_vector_store(pdf_path, "my_vector_store", force_reload=False)
if "chat_history" not in st.session_state:
st.session_state['chat_history'] = []
display_chat_history(st.session_state['chat_history'])
st.write("<!-- Start Spacer -->", unsafe_allow_html=True)
st.write("<div style='flex: 1;'></div>", unsafe_allow_html=True)
st.write("<!-- End Spacer -->", unsafe_allow_html=True)
new_messages_placeholder = st.empty()
query = st.text_input("Ask questions about your PDF file (in any preferred language):")
if st.button("Was genau ist ein Belegarzt?"):
query = "Was genau ist ein Belegarzt?"
if st.button("Wofür wird die Alpha-ID verwendet?"):
query = "Wofür wird die Alpha-ID verwendet?"
if st.button("Was sind die Vorteile des ambulanten Operierens?"):
query = "Was sind die Vorteile des ambulanten Operierens?"
if st.button("Was kann ich mit dem Prognose-Analyse-Tool machen?"):
query = "Was kann ich mit dem Prognose-Analyse-Tool machen?"
if st.button("Was sagt mir die Farbe der Balken der Bevölkerungsentwicklung?"):
query = "Was sagt mir die Farbe der Balken der Bevölkerungsentwicklung?"
if st.button("Ich habe mein Meta-Password vergessen, wie kann ich es zurücksetzen?"):
query = "Ich habe mein Meta-Password vergessen, wie kann ich es zurücksetzen?"
if query:
st.session_state['chat_history'].append(("User", query, "new"))
# Start timing
start_time = time.time()
with st.spinner('Bot is thinking...'):
# Use the VectorStore loaded at the start from the session state
chain = load_chatbot()
docs = VectorStore.similarity_search(query=query, k=3)
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=query)
# Stop timing
end_time = time.time()
# Calculate duration
duration = end_time - start_time
# You can use Streamlit's text function to display the timing
st.text(f"Response time: {duration:.2f} seconds")
st.session_state['chat_history'].append(("Bot", response, "new"))
# Display new messages at the bottom
new_messages = st.session_state['chat_history'][-2:]
for chat in new_messages:
background_color = "#ffeecf" if chat[2] == "new" else "#ffeecf" if chat[0] == "User" else "#ffeecf"
new_messages_placeholder.markdown(f"<div style='background-color: {background_color}; padding: 10px; border-radius: 10px; margin: 10px;'>{chat[0]}: {chat[1]}</div>", unsafe_allow_html=True)
# Clear the input field after the query is made
query = ""
# Mark all messages as old after displaying
st.session_state['chat_history'] = [(sender, msg, "old") for sender, msg, _ in st.session_state['chat_history']]
except Exception as e:
st.error(f"Upsi, an unexpected error occurred: {e}")
# Optionally log the exception details to a file or error tracking service
def display_chat_history(chat_history):
for chat in chat_history:
background_color = "#ffeecf" if chat[2] == "new" else "#ffeecf" if chat[0] == "User" else "#ffeecf"
st.markdown(f"<div style='background-color: {background_color}; padding: 10px; border-radius: 10px; margin: 10px;'>{chat[0]}: {chat[1]}</div>", unsafe_allow_html=True)
if __name__ == "__main__":
main() |