Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
"""
|
2 |
-
Question Answering with Retrieval QA and LangChain Language Models featuring
|
3 |
-
This script uses the LangChain Language Model API to answer questions using Retrieval QA
|
|
|
4 |
"""
|
5 |
-
|
6 |
import os
|
7 |
import streamlit as st
|
8 |
from dotenv import load_dotenv
|
@@ -104,10 +104,6 @@ def get_conversation_chain(vectorstore):
|
|
104 |
A conversational retrieval chain for generating responses.
|
105 |
|
106 |
"""
|
107 |
-
# llm = HuggingFaceHub(
|
108 |
-
# repo_id="mistralai/Mistral-7B-Instruct-v0.1",
|
109 |
-
# model_kwargs={"temperature": 0.5, "max_length": 512},
|
110 |
-
# )
|
111 |
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
|
112 |
|
113 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
@@ -143,6 +139,7 @@ def handle_userinput(user_question):
|
|
143 |
|
144 |
|
145 |
def main():
|
|
|
146 |
st.set_page_config(
|
147 |
page_title="Chat with a Bot that tries to answer questions about multiple PDFs",
|
148 |
page_icon=":books:",
|
|
|
1 |
"""
|
2 |
+
Question Answering with Retrieval QA and LangChain Language Models featuring FAISS vector stores.
|
3 |
+
This script uses the LangChain Language Model API to answer questions using Retrieval QA
|
4 |
+
and FAISS vector stores. It also uses the OpenAI API to generate responses.
|
5 |
"""
|
|
|
6 |
import os
|
7 |
import streamlit as st
|
8 |
from dotenv import load_dotenv
|
|
|
104 |
A conversational retrieval chain for generating responses.
|
105 |
|
106 |
"""
|
|
|
|
|
|
|
|
|
107 |
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
|
108 |
|
109 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
|
|
139 |
|
140 |
|
141 |
def main():
|
142 |
+
"""Put it all together"""
|
143 |
st.set_page_config(
|
144 |
page_title="Chat with a Bot that tries to answer questions about multiple PDFs",
|
145 |
page_icon=":books:",
|