File size: 2,268 Bytes
c23507d
 
 
 
 
f488983
 
 
 
c23507d
 
 
f488983
c23507d
 
 
f488983
 
 
 
c23507d
 
 
 
 
f488983
c23507d
 
 
 
 
 
 
 
f488983
 
628fc9c
 
 
 
c23507d
 
 
 
 
 
 
 
 
63e8cad
f488983
c6cb50d
63e8cad
 
 
 
 
628fc9c
 
 
 
 
 
 
 
 
 
63e8cad
c23507d
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import os
import streamlit as st

from embedchain import App

os.environ["HF_HOME"] = "./models"


#! PROVIDE HUGGINGFACE TOKEN IF RUNNING OFFLINE


@st.cache_resource
def conversational_ai():
    return App.from_config(config_path="./config_main.yaml")


st.title('Demo of "AI Chatbot in Law"')
st.caption(
    "πŸš€ A demo of conversation AI for Dhirubhai Ambani Centre for Technology and Law (DA-CTL) made by **Anurag Shukla**, **Tanaz Pathan** under guidance of **Prof. Prasenjit Majumder**"
)
if "messages" not in st.session_state:
    st.session_state.messages = [
        {
            "role": "assistant",
            "content": """
        Hi! I'm a conversational AI specializing in Indian Legal System. How may I assist you today?
        """,
        }
    ]

for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

if prompt := st.chat_input("Disclaimer: I am still a product in developement"):
    app = conversational_ai()
    # app.reset()
    # print(len(app.db.get()["metadatas"]))
    # print(len(app.get_data_sources()))
    # quit()

    with st.chat_message("user"):
        st.markdown(prompt)
        st.session_state.messages.append({"role": "user", "content": prompt})

    with st.chat_message("assistant"):
        msg_placeholder = st.empty()
        msg_placeholder.markdown("Thinking...")

        print("Querying the Agent.\n")
        cntxt = app.search(prompt)
        relevant_c = [i["context"] for i in cntxt if i["metadata"]["score"] <= 1.2]
        print(
            "\n===================\n",
            *relevant_c,
            sep="\n===================\n",
        )
        if len(relevant_c) != 0:
            full_response = app.llm.query(
                input_query=prompt,
                contexts=relevant_c,
            )
            full_response = full_response.rpartition("Answer:")[-1].strip()
        else:
            full_response = (
                "Sorry but I don't have relevant knowledge to asnwer that query."
            )
        print(f"\n#ANSWER\n\n{full_response}")

        msg_placeholder.markdown(full_response)
        st.session_state.messages.append(
            {"role": "assistant", "content": full_response}
        )