File size: 6,722 Bytes
d0ba0ce
 
7c95914
d0ba0ce
7c95914
 
 
 
 
 
 
 
d0ba0ce
7c95914
44c0e78
 
 
 
 
 
 
668775b
44c0e78
 
 
 
8ac64d9
47f6195
8d705f9
7c95914
8d705f9
 
807533f
7ffdf88
8d705f9
fb0ea79
7ffdf88
fb0ea79
807533f
fb0ea79
 
 
d0ba0ce
fb0ea79
807533f
 
 
 
 
d0ba0ce
807533f
 
d0ba0ce
 
a98948f
403a475
7c95914
d0ba0ce
7c95914
d0ba0ce
 
 
 
 
 
 
 
 
 
 
7c95914
 
 
 
2df9243
7c95914
 
 
 
f9dbffb
7c95914
a63f1b5
2b04423
d0ba0ce
 
2b04423
7c95914
8d705f9
 
dcd9708
d0ba0ce
 
 
 
 
 
7c95914
 
 
 
 
d0ba0ce
 
 
 
9ca7d21
 
d0ba0ce
 
7c95914
d0ba0ce
 
 
 
 
 
5765602
 
 
 
c48e7f7
5bde3b8
5765602
 
d0ba0ce
 
2e38376
d0ba0ce
 
2e38376
d0ba0ce
 
 
 
 
36c54d4
d0ba0ce
36c54d4
d0ba0ce
36c54d4
 
 
 
 
d0ba0ce
36c54d4
 
 
 
d0ba0ce
36c54d4
 
d0ba0ce
 
 
 
 
 
 
 
 
abd1f1b
0da8351
d0ba0ce
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import streamlit as st
from dotenv import load_dotenv
import pickle
from huggingface_hub import Repository
from PyPDF2 import PdfReader
from streamlit_extras.add_vertical_space import add_vertical_space
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.callbacks import get_openai_callback
import os

# Step 1: Clone the Dataset Repository
repo = Repository(
    local_dir="Private_Book",  # Local directory to clone the repository
    repo_type="dataset",  # Specify that this is a dataset repository
    
    clone_from="Anne31415/Private_Book",  # Replace with your repository URL
    
    token=os.environ["HUB_TOKEN"]  # Use the secret token to authenticate
)
repo.git_pull()  # Pull the latest changes (if any)

# Step 2: Load the PDF File
pdf_file_path = "Private_Book/Glossar_HELP_DESK_combi.pdf"  # Replace with your PDF file path

# Sidebar content
with st.sidebar:
    st.title('BinDoc GmbH')
    st.markdown("Experience revolutionary interaction with BinDocs Chat App, leveraging state-of-the-art AI technology.")
    
    add_vertical_space(2)  # Adjust as per the desired spacing
    
    st.markdown("""
    Hello! I’m here to assist you with:<br><br>
    📘 **Glossary Inquiries:**<br>
    I can clarify terms like "DiGA", "AOP", or "BfArM", providing clear and concise explanations to help you understand our content better.<br><br>
    🆘 **Help Page Navigation:**<br>
    Ask me if you forgot your password or want to know more about topics related to the platform.
    """, unsafe_allow_html=True)
    
    add_vertical_space(3)  # Adjust as per the desired spacing
    
    option = st.selectbox(
        'Want to see more Glossary Topics to ask about?',
        ('Basisfallwert', 'Cash Flow', 'Arzneimittelgesetz (AMG)')
    )

    st.write('Made with ❤️ by BinDoc GmbH')
    
    api_key = os.getenv("OPENAI_API_KEY")
    # Retrieve the API key from st.secrets

def load_pdf(file_path):
    pdf_reader = PdfReader(file_path)
    text = ""
    for page in pdf_reader.pages:
        text += page.extract_text()

    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1000,
        chunk_overlap=200,
        length_function=len
    )
    chunks = text_splitter.split_text(text=text)

    store_name, _ = os.path.splitext(os.path.basename(file_path))

    if os.path.exists(f"{store_name}.pkl"):
        with open(f"{store_name}.pkl", "rb") as f:
            VectorStore = pickle.load(f)
    else:
        embeddings = OpenAIEmbeddings()
        VectorStore = FAISS.from_texts(chunks, embedding=embeddings)
        with open(f"{store_name}.pkl", "wb") as f:
            pickle.dump(VectorStore, f)

    return VectorStore



def load_chatbot():
    return load_qa_chain(llm=OpenAI(), chain_type="stuff")

def main():
    # Main content
    st.title("Welcome to BinDocs ChatBot! 🤖")
    
    # Directly specifying the path to the PDF file
    pdf_path = pdf_file_path
    if not os.path.exists(pdf_path):
        st.error("File not found. Please check the file path.")
        return

    if "chat_history" not in st.session_state:
        st.session_state['chat_history'] = []

    display_chat_history(st.session_state['chat_history'])

    st.write("<!-- Start Spacer -->", unsafe_allow_html=True)
    st.write("<div style='flex: 1;'></div>", unsafe_allow_html=True)
    st.write("<!-- End Spacer -->", unsafe_allow_html=True)

    new_messages_placeholder = st.empty()

    if pdf_path is not None:
        query = st.text_input("Ask questions about your PDF file (in any preferred language):")

        if st.button("Was genau ist ein Belegarzt?"):
            query = "Was genau ist ein Belegarzt?"
        if st.button("Wofür wird die Alpha-ID verwendet?"):
            query = "Wofür wird die Alpha-ID verwendet?"
        if st.button("Was sind die Vorteile des ambulanten operierens?"):
            query = "Was sind die Vorteile des ambulanten operierens?"
        if st.button("Was kann ich mit dem Prognose-Analyse Toll machen?"):
            query = "Was kann ich mit dem Prognose-Analyse Toll machen?"
        if st.button("Was sagt mir die Farbe der Balken der Bevölkerungsentwicklung?"):
            query = "Was sagt mir die Farbe der Balken der Bevölkerungsentwicklung?"
        if st.button("Ich habe mein Meta Password vergessen, wie kann ich es zurücksetzen?"):
            query = ("Ich habe mein Meta Password vergessen, wie kann ich es zurücksetzen?")

            
        if st.button("Ask") or (not st.session_state['chat_history'] and query) or (st.session_state['chat_history'] and query != st.session_state['chat_history'][-1][1]):
            st.session_state['chat_history'].append(("User", query, "new"))

            loading_message = st.empty()
            loading_message.text('Bot is thinking...')

            VectorStore = load_pdf(pdf_path)
            chain = load_chatbot()
            docs = VectorStore.similarity_search(query=query, k=3)
            with get_openai_callback() as cb:
                response = chain.run(input_documents=docs, question=query)

            st.session_state['chat_history'].append(("Bot", response, "new"))

            # Display new messages at the bottom
            new_messages = st.session_state['chat_history'][-2:]
            for chat in new_messages:
                background_color = "#FFA07A" if chat[2] == "new" else "#acf" if chat[0] == "User" else "#caf"
                new_messages_placeholder.markdown(f"<div style='background-color: {background_color}; padding: 10px; border-radius: 10px; margin: 10px;'>{chat[0]}: {chat[1]}</div>", unsafe_allow_html=True)

            # Scroll to the latest response using JavaScript
            st.write("<script>document.getElementById('response').scrollIntoView();</script>", unsafe_allow_html=True)

            loading_message.empty()

            # Clear the input field by setting the query variable to an empty string
            query = ""

        # Mark all messages as old after displaying
        st.session_state['chat_history'] = [(sender, msg, "old") for sender, msg, _ in st.session_state['chat_history']]



def display_chat_history(chat_history):
    for chat in chat_history:
        background_color = "#FFA07A" if chat[2] == "new" else "#acf" if chat[0] == "User" else "#caf"
        st.markdown(f"<div style='background-color: {background_color}; padding: 10px; border-radius: 10px; margin: 10px;'>{chat[0]}: {chat[1]}</div>", unsafe_allow_html=True)

if __name__ == "__main__":
    main()