Spaces:
Sleeping
Sleeping
apahilaj
commited on
Commit
·
75f200e
1
Parent(s):
0e35643
app
Browse files
app.py
CHANGED
@@ -1,24 +1,33 @@
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
3 |
from langchain.embeddings import HuggingFaceEmbeddings
|
4 |
-
from langchain.vectorstores import faiss
|
5 |
-
from langchain_community.llms import HuggingFaceHub
|
6 |
-
from langchain.chains import
|
|
|
7 |
from langchain_community.document_loaders import PyPDFLoader
|
8 |
-
from langchain.text_splitter import
|
9 |
-
from
|
|
|
10 |
from langchain.prompts import PromptTemplate
|
|
|
11 |
from langchain.memory import ConversationBufferMemory
|
12 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
import re
|
|
|
14 |
|
15 |
api_token = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
|
16 |
|
17 |
-
memory = ConversationBufferMemory(
|
18 |
-
memory_key="chat_history",
|
19 |
-
return_messages=True
|
20 |
-
)
|
21 |
-
|
22 |
model = HuggingFaceHub(
|
23 |
huggingfacehub_api_token=api_token,
|
24 |
repo_id="mistralai/Mistral-7B-Instruct-v0.2",
|
@@ -53,34 +62,24 @@ def load_db(file, k):
|
|
53 |
retriever=retriever,
|
54 |
return_source_documents=True,
|
55 |
return_generated_question=True,
|
56 |
-
memory=memory,
|
57 |
)
|
58 |
|
59 |
return qa
|
60 |
|
61 |
-
|
62 |
-
qa = load_db(pdf_file, 3)
|
63 |
-
print("MEMORY")
|
64 |
-
print(memory)
|
65 |
-
if not memory.history:
|
66 |
-
# If no previous conversation, start with a greeting
|
67 |
-
response = qa.invoke({"question": "Hi, how can I help you today?", "chat_history": []})
|
68 |
-
memory.update(response["chat_history"])
|
69 |
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
74 |
if match:
|
75 |
helpful_answer = match.group(1).strip()
|
|
|
|
|
|
|
76 |
else:
|
77 |
-
|
78 |
-
|
79 |
-
# Update the chat history
|
80 |
-
memory.update([(input_text, helpful_answer)])
|
81 |
-
|
82 |
-
return helpful_answer
|
83 |
-
|
84 |
|
85 |
-
iface = gr.Interface(fn=
|
86 |
iface.launch(share=True)
|
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
3 |
from langchain.embeddings import HuggingFaceEmbeddings
|
4 |
+
from langchain.vectorstores import Chroma, faiss
|
5 |
+
from langchain_community.llms import HuggingFaceEndpoint, HuggingFaceHub
|
6 |
+
from langchain.chains import LLMChain
|
7 |
+
from langchain_community.document_loaders.csv_loader import CSVLoader
|
8 |
from langchain_community.document_loaders import PyPDFLoader
|
9 |
+
from langchain.text_splitter import CharacterTextSplitter
|
10 |
+
from langchain_community.document_loaders import TextLoader
|
11 |
+
from langchain_community import vectorstores
|
12 |
from langchain.prompts import PromptTemplate
|
13 |
+
from langchain.chains import RetrievalQA
|
14 |
from langchain.memory import ConversationBufferMemory
|
15 |
+
from langchain.chains import ConversationalRetrievalChain
|
16 |
+
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
|
17 |
+
from langchain.vectorstores import DocArrayInMemorySearch
|
18 |
+
from langchain.document_loaders import TextLoader
|
19 |
+
from langchain.chains import RetrievalQA, ConversationalRetrievalChain
|
20 |
+
from langchain.memory import ConversationBufferMemory
|
21 |
+
from langchain.chat_models import ChatOpenAI
|
22 |
+
from langchain.document_loaders import TextLoader
|
23 |
+
from langchain.document_loaders import PyPDFLoader
|
24 |
+
import panel as pn
|
25 |
+
import param
|
26 |
import re
|
27 |
+
import os
|
28 |
|
29 |
api_token = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
|
30 |
|
|
|
|
|
|
|
|
|
|
|
31 |
model = HuggingFaceHub(
|
32 |
huggingfacehub_api_token=api_token,
|
33 |
repo_id="mistralai/Mistral-7B-Instruct-v0.2",
|
|
|
62 |
retriever=retriever,
|
63 |
return_source_documents=True,
|
64 |
return_generated_question=True,
|
|
|
65 |
)
|
66 |
|
67 |
return qa
|
68 |
|
69 |
+
chat_history = [] # initialize chat history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
+
def greet(question, pdf_file):
|
72 |
+
global chat_history
|
73 |
+
a = load_db(pdf_file, 3)
|
74 |
+
r = a.invoke({"question": question, "chat_history": chat_history})
|
75 |
+
match = re.search(r'Helpful Answer:(.*)', r['answer'])
|
76 |
if match:
|
77 |
helpful_answer = match.group(1).strip()
|
78 |
+
# Extend chat history with the current question and answer
|
79 |
+
chat_history.extend([(question, helpful_answer)])
|
80 |
+
return helpful_answer
|
81 |
else:
|
82 |
+
return "No helpful answer found."
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
+
iface = gr.Interface(fn=greet, inputs=["text", "file"], outputs="text")
|
85 |
iface.launch(share=True)
|