Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,7 +4,6 @@ This script uses the LangChain Language Model API to answer questions using Retr
|
|
| 4 |
and FAISS vector stores. It also uses the Mistral huggingface inference endpoint to
|
| 5 |
generate responses.
|
| 6 |
"""
|
| 7 |
-
|
| 8 |
import os
|
| 9 |
import streamlit as st
|
| 10 |
from dotenv import load_dotenv
|
|
@@ -15,22 +14,9 @@ from langchain.vectorstores import FAISS
|
|
| 15 |
from langchain.chat_models import ChatOpenAI
|
| 16 |
from langchain.memory import ConversationBufferMemory
|
| 17 |
from langchain.chains import ConversationalRetrievalChain
|
| 18 |
-
from langchain.schema import BaseOutputParser, OutputParserException
|
| 19 |
from htmlTemplates import css, bot_template, user_template
|
| 20 |
from langchain.llms import HuggingFaceHub
|
| 21 |
|
| 22 |
-
class ReferenceOutputParser(BaseOutputParser[ChatGeneration]):
|
| 23 |
-
def parse(self, text: str) -> ChatGeneration:
|
| 24 |
-
try:
|
| 25 |
-
result, references = text.split("References:")
|
| 26 |
-
return ChatGeneration(
|
| 27 |
-
result=result.strip(),
|
| 28 |
-
extra_info={"references": [ref.strip() for ref in references.split("\n") if ref.strip()]}
|
| 29 |
-
)
|
| 30 |
-
except ValueError:
|
| 31 |
-
raise OutputParserException(f"Could not parse output: {text}")
|
| 32 |
-
|
| 33 |
-
|
| 34 |
def get_pdf_text(pdf_docs):
|
| 35 |
text = ""
|
| 36 |
for pdf in pdf_docs:
|
|
@@ -79,7 +65,7 @@ def get_conversation_chain(vectorstore):
|
|
| 79 |
)
|
| 80 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
| 81 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
| 82 |
-
llm=llm, retriever=vectorstore.as_retriever(), memory=memory
|
| 83 |
)
|
| 84 |
except Exception as e:
|
| 85 |
st.error(f"Error creating conversation chain: {e}")
|
|
@@ -95,14 +81,11 @@ def handle_userinput(user_question):
|
|
| 95 |
response = st.session_state.conversation({"question": user_question})
|
| 96 |
st.session_state.chat_history = response["chat_history"]
|
| 97 |
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
st.write("References:")
|
| 104 |
-
for ref in references:
|
| 105 |
-
st.write("- " + ref)
|
| 106 |
except Exception as e:
|
| 107 |
st.error(f"Error handling user input: {e}")
|
| 108 |
|
|
|
|
| 4 |
and FAISS vector stores. It also uses the Mistral huggingface inference endpoint to
|
| 5 |
generate responses.
|
| 6 |
"""
|
|
|
|
| 7 |
import os
|
| 8 |
import streamlit as st
|
| 9 |
from dotenv import load_dotenv
|
|
|
|
| 14 |
from langchain.chat_models import ChatOpenAI
|
| 15 |
from langchain.memory import ConversationBufferMemory
|
| 16 |
from langchain.chains import ConversationalRetrievalChain
|
|
|
|
| 17 |
from htmlTemplates import css, bot_template, user_template
|
| 18 |
from langchain.llms import HuggingFaceHub
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
def get_pdf_text(pdf_docs):
|
| 21 |
text = ""
|
| 22 |
for pdf in pdf_docs:
|
|
|
|
| 65 |
)
|
| 66 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
| 67 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
| 68 |
+
llm=llm, retriever=vectorstore.as_retriever(), memory=memory
|
| 69 |
)
|
| 70 |
except Exception as e:
|
| 71 |
st.error(f"Error creating conversation chain: {e}")
|
|
|
|
| 81 |
response = st.session_state.conversation({"question": user_question})
|
| 82 |
st.session_state.chat_history = response["chat_history"]
|
| 83 |
|
| 84 |
+
for i, message in enumerate(st.session_state.chat_history):
|
| 85 |
+
if i % 2 == 0:
|
| 86 |
+
st.write("//_^ User: " + message.content)
|
| 87 |
+
else:
|
| 88 |
+
st.write("🤖 ChatBot: " + message.content)
|
|
|
|
|
|
|
|
|
|
| 89 |
except Exception as e:
|
| 90 |
st.error(f"Error handling user input: {e}")
|
| 91 |
|