Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import docx
|
4 |
+
import numpy as np
|
5 |
+
from sentence_transformers import SentenceTransformer
|
6 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
7 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
+
from langchain_community.vectorstores import FAISS
|
9 |
+
from langchain.chains import ConversationalRetrievalChain
|
10 |
+
from langchain.memory import ConversationBufferMemory
|
11 |
+
from langchain_community.llms import HuggingFaceEndpoint
|
12 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
13 |
+
|
14 |
+
# Initialize semantic model
|
15 |
+
semantic_model = SentenceTransformer("all-MiniLM-L6-v2")
|
16 |
+
|
17 |
+
def extract_text_from_docx(file_path):
|
18 |
+
doc = docx.Document(file_path)
|
19 |
+
extracted_text = []
|
20 |
+
|
21 |
+
for para in doc.paragraphs:
|
22 |
+
if para.text.strip():
|
23 |
+
extracted_text.append(para.text.strip())
|
24 |
+
|
25 |
+
for table in doc.tables:
|
26 |
+
extracted_text.append("π Table Detected:")
|
27 |
+
for row in table.rows:
|
28 |
+
row_text = [cell.text.strip() for cell in row.cells]
|
29 |
+
if any(row_text):
|
30 |
+
extracted_text.append(" | ".join(row_text))
|
31 |
+
|
32 |
+
return "\n".join(extracted_text)
|
33 |
+
|
34 |
+
def load_documents():
|
35 |
+
file_paths = {
|
36 |
+
"Fastener_Types_Manual": "Fastener_Types_Manual.docx",
|
37 |
+
"Manufacturing_Expert_Manual": "Manufacturing Expert Manual.docx"
|
38 |
+
}
|
39 |
+
|
40 |
+
all_splits = []
|
41 |
+
|
42 |
+
for doc_name, file_path in file_paths.items():
|
43 |
+
if not os.path.exists(file_path):
|
44 |
+
raise FileNotFoundError(f"Document not found: {file_path}")
|
45 |
+
|
46 |
+
print(f"Extracting text from {file_path}...")
|
47 |
+
full_text = extract_text_from_docx(file_path)
|
48 |
+
|
49 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=200)
|
50 |
+
doc_splits = text_splitter.create_documents([full_text])
|
51 |
+
|
52 |
+
for chunk in doc_splits:
|
53 |
+
chunk.metadata = {"source": doc_name}
|
54 |
+
|
55 |
+
all_splits.extend(doc_splits)
|
56 |
+
|
57 |
+
return all_splits
|
58 |
+
|
59 |
+
def create_db(splits):
|
60 |
+
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-base-en-v1.5")
|
61 |
+
vectordb = FAISS.from_documents(splits, embeddings)
|
62 |
+
return vectordb, embeddings
|
63 |
+
|
64 |
+
def retrieve_documents(query, retriever, embeddings):
|
65 |
+
query_embedding = np.array(embeddings.embed_query(query)).reshape(1, -1)
|
66 |
+
results = retriever.invoke(query)
|
67 |
+
|
68 |
+
if not results:
|
69 |
+
return []
|
70 |
+
|
71 |
+
doc_embeddings = np.array([embeddings.embed_query(doc.page_content) for doc in results])
|
72 |
+
similarity_scores = cosine_similarity(query_embedding, doc_embeddings)[0]
|
73 |
+
|
74 |
+
MIN_SIMILARITY = 0.5
|
75 |
+
filtered_results = [(doc, sim) for doc, sim in zip(results, similarity_scores) if sim >= MIN_SIMILARITY]
|
76 |
+
|
77 |
+
print(f"π Query: {query}")
|
78 |
+
print(f"π Retrieved Docs: {[(doc.metadata.get('source', 'Unknown'), sim) for doc, sim in filtered_results]}")
|
79 |
+
|
80 |
+
return [doc for doc, _ in filtered_results] if filtered_results else []
|
81 |
+
|
82 |
+
def validate_query_semantically(query, retrieved_docs):
|
83 |
+
if not retrieved_docs:
|
84 |
+
return False
|
85 |
+
|
86 |
+
combined_text = " ".join([doc.page_content for doc in retrieved_docs])
|
87 |
+
query_embedding = semantic_model.encode(query, normalize_embeddings=True)
|
88 |
+
doc_embedding = semantic_model.encode(combined_text, normalize_embeddings=True)
|
89 |
+
|
90 |
+
similarity_score = np.dot(query_embedding, doc_embedding)
|
91 |
+
print(f"π Semantic Similarity Score: {similarity_score}")
|
92 |
+
|
93 |
+
return similarity_score >= 0.3
|
94 |
+
|
95 |
+
def initialize_chatbot(vector_db, embeddings):
|
96 |
+
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True, output_key='answer')
|
97 |
+
|
98 |
+
retriever = vector_db.as_retriever(search_kwargs={"k": 5})
|
99 |
+
|
100 |
+
system_prompt = """You are an AI assistant that answers questions ONLY based on the provided documents.
|
101 |
+
- If no relevant documents are retrieved, respond with: "I couldn't find any relevant information."
|
102 |
+
- If the meaning of the query does not match the retrieved documents, say "I couldn't find any relevant information."
|
103 |
+
- Do NOT attempt to answer from general knowledge."""
|
104 |
+
|
105 |
+
llm = HuggingFaceEndpoint(
|
106 |
+
repo_id="tiiuae/falcon-40b-instruct",
|
107 |
+
huggingfacehub_api_token=os.environ.get("HUGGINGFACE_API_TOKEN"),
|
108 |
+
temperature=0.1,
|
109 |
+
max_new_tokens=400,
|
110 |
+
task="text-generation",
|
111 |
+
system_prompt=system_prompt
|
112 |
+
)
|
113 |
+
|
114 |
+
qa_chain = ConversationalRetrievalChain.from_llm(
|
115 |
+
llm=llm,
|
116 |
+
retriever=retriever,
|
117 |
+
memory=memory,
|
118 |
+
return_source_documents=True,
|
119 |
+
verbose=False
|
120 |
+
)
|
121 |
+
|
122 |
+
return retriever, qa_chain
|
123 |
+
|
124 |
+
def handle_query(query, history, retriever, qa_chain, embeddings):
|
125 |
+
retrieved_docs = retrieve_documents(query, retriever, embeddings)
|
126 |
+
|
127 |
+
if not retrieved_docs or not validate_query_semantically(query, retrieved_docs):
|
128 |
+
return history + [(query, "I couldn't find any relevant information.")], ""
|
129 |
+
|
130 |
+
response = qa_chain.invoke({"question": query, "chat_history": history})
|
131 |
+
assistant_response = response['answer'].strip()
|
132 |
+
|
133 |
+
if not validate_query_semantically(query, retrieved_docs):
|
134 |
+
assistant_response = "I couldn't find any relevant information."
|
135 |
+
|
136 |
+
assistant_response += f"\n\nπ Source: {', '.join(set(doc.metadata.get('source', 'Unknown') for doc in retrieved_docs))}"
|
137 |
+
|
138 |
+
history.append((query, assistant_response))
|
139 |
+
return history, ""
|
140 |
+
|
141 |
+
def demo():
|
142 |
+
documents = load_documents()
|
143 |
+
vector_db, embeddings = create_db(documents)
|
144 |
+
retriever, qa_chain = initialize_chatbot(vector_db, embeddings)
|
145 |
+
|
146 |
+
with gr.Blocks() as app:
|
147 |
+
gr.Markdown("### π€ Document Question Answering System")
|
148 |
+
|
149 |
+
chatbot = gr.Chatbot()
|
150 |
+
query_input = gr.Textbox(label="Ask a question about the documents")
|
151 |
+
query_btn = gr.Button("Submit")
|
152 |
+
|
153 |
+
def user_query_handler(query, history):
|
154 |
+
return handle_query(query, history, retriever, qa_chain, embeddings)
|
155 |
+
|
156 |
+
query_btn.click(
|
157 |
+
user_query_handler,
|
158 |
+
inputs=[query_input, chatbot],
|
159 |
+
outputs=[chatbot, query_input]
|
160 |
+
)
|
161 |
+
|
162 |
+
query_input.submit(
|
163 |
+
user_query_handler,
|
164 |
+
inputs=[query_input, chatbot],
|
165 |
+
outputs=[chatbot, query_input]
|
166 |
+
)
|
167 |
+
|
168 |
+
app.launch()
|
169 |
+
|
170 |
+
if __name__ == "__main__":
|
171 |
+
demo()
|