File size: 2,215 Bytes
cd9c23a
 
5dd5730
 
cd9c23a
 
 
 
 
c2a504f
cd9c23a
5dd5730
 
cd9c23a
5dd5730
 
 
 
 
 
 
 
 
cd9c23a
7585d9c
5dd5730
 
 
 
cd9c23a
 
 
 
 
 
 
5dd5730
 
 
 
cd9c23a
 
 
 
 
5dd5730
cd9c23a
5dd5730
 
 
 
 
 
 
 
 
c2a504f
 
5dd5730
 
c2a504f
5dd5730
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import json
import streamlit as st
from haystack.document_stores import FAISSDocumentStore
# from haystack.document_stores import InMemoryDocumentStore
from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
from haystack.nodes import DensePassageRetriever
from haystack.nodes import FARMReader
from haystack.pipelines import ExtractiveQAPipeline

st.title("DPR on Supreme Court Judgements (Capital Gain)")

# with open("responses.json", 'r') as f:
#   data = json.load(f)

# documents = [
#     {
#         "content": doc["text"],
#         "meta": {
#             "name": doc["title"],
#             "url": doc["url"]
#         }
#     } for doc in data
# ]

# document_store = FAISSDocumentStore(embedding_dim=768, faiss_index_factory_str="Flat")
# document_store = InMemoryDocumentStore()
# document_store.write_documents(documents)

document_store = FAISSDocumentStore.load("faiss_index")

retriever = DensePassageRetriever(
    document_store=document_store,
    query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
    passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base",
)

# document_store.update_embeddings(retriever)
# document_store.save("./document_store")



reader = FARMReader(model_name_or_path="deepset/bert-base-cased-squad2")

pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever)

# query = st.text_input("Enter your query:", "")

# # if query:
# #     with st.spinner("Searching..."):
# #         results = pipeline.run(query=query, params={"Retriever": {"top_k": 5}})
# #         st.write("Results:")
# #         for idx, result in enumerate(results["documents"]):
# #             st.write(f"**{idx + 1}. {result.meta['name']}**")
# #             st.write(f"URL: {result.meta['url']}")
# #             st.write(result.content)
# #             st.write("---")

# query = st.text_input("Enter Question")
query = "What is the subject matter of the petition in the Sadanand S. Varde case?"
result = pipeline.run(query=query, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}})

for answer in result['answers']:
    st.markdown(f"=====================\nAnswer: {answer.answer}\nContext: {answer.context}\nScore: {answer.score}")