Spaces:
Runtime error
Runtime error
File size: 6,463 Bytes
a784f59 5107c88 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
import os
os.system("pip install --upgrade pip")
import re
import time
import io
from io import StringIO
from typing import Any, Dict, List
#Modules to Import
import openai
import streamlit as st
from langchain import LLMChain, OpenAI
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import RetrievalQA
from langchain.chains.question_answering import load_qa_chain
from langchain.docstore.document import Document
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import VectorStore
from langchain.vectorstores.faiss import FAISS
from pypdf import PdfReader
@st.cache_data
def parse_pdf (file: io.BytesIO)-> List[str]:
pdf = PdfReader(file)
output = []
for page in pdf.pages:
text = page.extract_text()
#Merge hyphenated words
text = re.sub(r"(\w+)-\n(\w+)", "\1\2", text)
# Fix newlines in the middle of sentences
text = re.sub(r"(?<!\n\s)\n(?!\s\n)", " ", text.strip())
#Remove multiple newlines
text = re.sub(r"\n\s*\n", "\n\n", text)
output.append(text)
return output
@st.cache_data
def text_to_docs(text: str) -> List [Document]:
"""Converts a string or list of strings to a list of Documents with metadata,"""
if isinstance(text, str):
#Take a single string as one page
text = [text]
page_docs = [Document (page_content=page) for page in text]
# Add page numbers as metadata
for i, doc in enumerate(page_docs):
doc.metadata["page"] = 1 + 1
# Split pages into chunks
doc_chunks = []
for doc in page_docs:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=4000,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=0,
)
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": 1}
)
# Add sources a metadata
doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}"
doc_chunks.append(doc)
return doc_chunks
uploaded_file = st.sidebar.file_uploader(":blue[Upload]", type=["pdf"])
if uploaded_file:
doc = parse_pdf(uploaded_file)
pages = text_to_docs(doc)
# pages
if pages:
with st.expander('Show page contents', expanded=False):
page_sel =st.number_input(
label="selected page", min_value=1, max_value=len(pages), step=1
)
st.write(pages[page_sel-1])
api = st.sidebar.text_input(
"Open api key",
type="password",
placeholder="sk-",
help="https://platform.openai.com/account/api-keys",
)
if api:
embeddings = OpenAIEmbeddings(openai_api_key = api)
# Indexing
# Save in a Vector DB_
with st.spinner("It's indexing. .."):
index = FAISS.from_documents(pages, embeddings)
qa = RetrievalQA.from_chain_type(
llm = OpenAI(openai_api_key = api),
chain_type = "stuff",
retriever = index.as_retriever()
)
# our tool
tools = [
Tool(
name="State of Union QA System",
func=qa.run,
description="Useful for when you need to answer questions about the aspects asked. Input may be a partial or fully formed question."
)
]
prefix=""""Have a conversation with a human, answering the following questions as best you can based on the context and memory available.
You have access to a single tool:"""
suffix="""Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferMemory(memory_key ="chat_history")
#Chain
# ZeroShotAgent
llm_chain = LLMChain(
llm=OpenAI(
temperature=0, openai_api_key=api, model_name="gpt-3.5-turbo"
),
prompt=prompt,
)
agent = ZeroShotAgent (llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=st.session_state.memory
)
container = st.container()
with container:
st.title("🤖 AI ChatBot")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if query := st.chat_input("Hey yo !!! Wazzups!"):
st.chat_message("user").markdown(query)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": query})
# response=llm_chain.memory.chat_memory.add_user_message(prompt)
with st.spinner("It's indexing. .."):
response = agent_chain.run(query)
# st.write(response)
# #f"Echo: {prompt}" get_completion(template_string) #
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
# with st.expander("History/Memory"):
# st.write(st.session_state.memory)
|