import os from huggingface_hub import Repository import streamlit.components.v1 as components from datasets import load_dataset import random import pickle from nltk.tokenize import sent_tokenize import nltk from PyPDF2 import PdfReader import streamlit as st from streamlit_extras.add_vertical_space import add_vertical_space from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain.llms import OpenAI from langchain.chains.question_answering import load_qa_chain from langchain.callbacks import get_openai_callback from my_component import my_component nltk.download('punkt') # Step 1: Clone the Dataset Repository repo = Repository( local_dir="Private_Book", # Local directory to clone the repository repo_type="dataset", # Specify that this is a dataset repository clone_from="Anne31415/Private_Book", # Replace with your repository URL token=os.os.getenv["HUB_TOKEN"] # Use the secret token to authenticate ) repo.git_pull() # Pull the latest changes (if any) # Step 2: Load the PDF File pdf_file_path = "Private_Book/Glossar_PDF_webscraping.pdf" # Replace with your PDF file path # Sidebar contents with st.sidebar: st.title(':orange_book: BinDoc GmbH') api_key = os.getenv("OPENAI_API_KEY") # Retrieve the API key from st.secrets if not api_key: st.warning('API key is required to proceed.') st.stop() # Stop the app if the API key is not provided st.markdown("Experience the future of document interaction with the revolutionary") st.markdown("**BinDocs Chat App**.") st.markdown("Harnessing the power of a Large Language Model and AI technology,") st.markdown("this innovative platform redefines PDF engagement,") st.markdown("enabling dynamic conversations that bridge the gap between") st.markdown("human and machine intelligence.") add_vertical_space(3) # Add more vertical space between text blocks st.write('Made with ❤️ by BinDoc GmbH') def load_pdf(file_path): pdf_reader = PdfReader(file_path) chunks = [] for page in pdf_reader.pages: text = page.extract_text() if text: chunks.append(text) store_name = file_path.name[:-4] if os.path.exists(f"{store_name}.pkl"): with open(f"{store_name}.pkl", "rb") as f: VectorStore = pickle.load(f) else: embeddings = OpenAIEmbeddings() VectorStore = FAISS.from_texts(chunks, embedding=embeddings) with open(f"{store_name}.pkl", "wb") as f: pickle.dump(VectorStore, f) return VectorStore def load_chatbot(max_tokens=300): return load_qa_chain(llm=OpenAI(temperature=0.1, max_tokens=max_tokens), chain_type="stuff") def display_chat_history(chat_history): for chat in chat_history: background_color = "#FFA07A" if chat[2] == "new" else "#acf" if chat[0] == "User" else "#caf" st.markdown(f"