Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,158 +1,158 @@
|
|
1 |
-
|
2 |
-
import logging
|
3 |
-
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
|
4 |
-
from langchain_huggingface import HuggingFaceEmbeddings
|
5 |
-
from sentence_transformers import SentenceTransformer
|
6 |
-
|
7 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
-
from langchain_community.vectorstores import FAISS
|
9 |
-
from langchain.prompts import PromptTemplate
|
10 |
-
from langchain_together import Together
|
11 |
-
from langchain.memory import ConversationBufferMemory
|
12 |
-
from langchain.chains import ConversationalRetrievalChain
|
13 |
-
import streamlit as st
|
14 |
-
import os
|
15 |
-
|
16 |
-
from dotenv import load_dotenv
|
17 |
-
import warnings
|
18 |
-
logging.basicConfig(level=logging.DEBUG) # Logs at DEBUG level and above
|
19 |
-
logger = logging.getLogger(__name__)
|
20 |
-
|
21 |
-
logger.debug("Starting Streamlit app...")
|
22 |
-
# Suppress PyTorch FutureWarning
|
23 |
-
warnings.filterwarnings("ignore", message="You are using `torch.load` with `weights_only=False`")
|
24 |
-
warnings.filterwarnings("ignore", message="Tried to instantiate class '__path__._path'")
|
25 |
-
warnings.filterwarnings("ignore", category=FutureWarning)
|
26 |
-
# Suppress generic DeprecationWarnings (including LangChain)
|
27 |
-
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
load_dotenv()
|
32 |
-
TOGETHER_AI_API = os.getenv("TOGETHER_AI")
|
33 |
-
|
34 |
-
# Streamlit Page Config
|
35 |
-
st.set_page_config(page_title="Law4her")
|
36 |
-
col1, col2, col3 = st.columns([1, 4, 1])
|
37 |
-
with col2:
|
38 |
-
st.image(
|
39 |
-
"https://res.cloudinary.com/dzzhbgbnp/image/upload/v1736073326/lawforher_logo1_yznqxr.png"
|
40 |
-
)
|
41 |
-
|
42 |
-
st.markdown(
|
43 |
-
"""
|
44 |
-
<style>
|
45 |
-
div.stButton > button:first-child {
|
46 |
-
background-color: #ffffff; /* White background */
|
47 |
-
color: #000000; /* Black text */
|
48 |
-
border: 1px solid #000000; /* Optional: Add a black border */
|
49 |
-
}
|
50 |
-
|
51 |
-
div.stButton > button:active {
|
52 |
-
background-color: #e0e0e0; /* Slightly darker white for active state */
|
53 |
-
color: #000000; /* Black text remains the same */
|
54 |
-
}
|
55 |
-
|
56 |
-
div[data-testid="stStatusWidget"] div button {
|
57 |
-
display: none;
|
58 |
-
}
|
59 |
-
.reportview-container {
|
60 |
-
margin-top: -2em;
|
61 |
-
}
|
62 |
-
#MainMenu {visibility: hidden;}
|
63 |
-
.stDeployButton {display:none;}
|
64 |
-
footer {visibility: hidden;}
|
65 |
-
#stDecoration {display:none;}
|
66 |
-
button[title="View fullscreen"]{
|
67 |
-
visibility: hidden;}
|
68 |
-
</style>
|
69 |
-
""",
|
70 |
-
unsafe_allow_html=True,
|
71 |
-
)
|
72 |
-
|
73 |
-
# Reset Conversation
|
74 |
-
def reset_conversation():
|
75 |
-
st.session_state.messages = [{"role": "assistant", "content": "Hi, how can I help you?"}]
|
76 |
-
st.session_state.memory.clear()
|
77 |
-
|
78 |
-
# Initialize chat messages and memory
|
79 |
-
if "messages" not in st.session_state:
|
80 |
-
st.session_state.messages = [{"role": "assistant", "content": "Hi, how can I help you?"}]
|
81 |
-
|
82 |
-
if "memory" not in st.session_state:
|
83 |
-
st.session_state.memory = ConversationBufferMemory(
|
84 |
-
memory_key="chat_history",
|
85 |
-
return_messages=True
|
86 |
-
)
|
87 |
-
|
88 |
-
# Load embeddings and vectorstore
|
89 |
-
embeddings = HuggingFaceEmbeddings(
|
90 |
-
model_name="nomic-ai/nomic-embed-text-v1",
|
91 |
-
model_kwargs={"trust_remote_code": True, "revision": "289f532e14dbbbd5a04753fa58739e9ba766f3c7"},
|
92 |
-
)
|
93 |
-
|
94 |
-
# Enable dangerous deserialization (safe only if the file is trusted and created by you)
|
95 |
-
db = FAISS.load_local("ipc_vector_db", embeddings, allow_dangerous_deserialization=True)
|
96 |
-
db_retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 2, "max_length": 512})
|
97 |
-
|
98 |
-
prompt_template = """<s>[INST]As a legal chatbot specializing in the Indian Penal Code, provide a concise and accurate answer based on the given context. Avoid unnecessary details or unrelated content. Only respond if the answer can be derived from the provided context; otherwise, say "The information is not available in the provided context."
|
99 |
-
CONTEXT: {context}
|
100 |
-
CHAT HISTORY: {chat_history}
|
101 |
-
QUESTION: {question}
|
102 |
-
ANSWER:
|
103 |
-
</s>[INST]
|
104 |
-
"""
|
105 |
-
|
106 |
-
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question", "chat_history"])
|
107 |
-
|
108 |
-
# Initialize the Together API
|
109 |
-
llm = Together(
|
110 |
-
model="mistralai/Mistral-7B-Instruct-v0.2",
|
111 |
-
temperature=0.5,
|
112 |
-
max_tokens=1024,
|
113 |
-
together_api_key=TOGETHER_AI_API,
|
114 |
-
)
|
115 |
-
|
116 |
-
qa = ConversationalRetrievalChain.from_llm(
|
117 |
-
llm=llm,
|
118 |
-
memory=st.session_state.memory,
|
119 |
-
retriever=db_retriever,
|
120 |
-
combine_docs_chain_kwargs={"prompt": prompt},
|
121 |
-
)
|
122 |
-
|
123 |
-
# Display chat history
|
124 |
-
for message in st.session_state.messages:
|
125 |
-
with st.chat_message(message.get("role")):
|
126 |
-
st.write(message.get("content"))
|
127 |
-
|
128 |
-
# User input
|
129 |
-
input_prompt = st.chat_input("Ask a legal question about the Indian Penal Code")
|
130 |
-
|
131 |
-
if input_prompt:
|
132 |
-
with st.chat_message("user"):
|
133 |
-
st.write(input_prompt)
|
134 |
-
|
135 |
-
st.session_state.messages.append({"role": "user", "content": input_prompt})
|
136 |
-
|
137 |
-
with st.chat_message("assistant"):
|
138 |
-
with st.status("Thinking π‘...", expanded=True):
|
139 |
-
try:
|
140 |
-
# Pass the user question
|
141 |
-
result = qa.invoke(input=input_prompt)
|
142 |
-
full_response = result.get("answer", "")
|
143 |
-
|
144 |
-
# Ensure the answer is a string
|
145 |
-
if isinstance(full_response, list):
|
146 |
-
full_response = " ".join(full_response)
|
147 |
-
elif not isinstance(full_response, str):
|
148 |
-
full_response = str(full_response)
|
149 |
-
|
150 |
-
# Display the response
|
151 |
-
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
152 |
-
st.write(full_response)
|
153 |
-
|
154 |
-
except Exception as e:
|
155 |
-
st.error(f"Error occurred: {e}")
|
156 |
-
|
157 |
-
# Add reset button
|
158 |
-
st.button("Reset All Chat π", on_click=reset_conversation)
|
|
|
1 |
+
|
2 |
+
import logging
|
3 |
+
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
|
4 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
5 |
+
from sentence_transformers import SentenceTransformer
|
6 |
+
|
7 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
+
from langchain_community.vectorstores import FAISS
|
9 |
+
from langchain.prompts import PromptTemplate
|
10 |
+
from langchain_together import Together
|
11 |
+
from langchain.memory import ConversationBufferMemory
|
12 |
+
from langchain.chains import ConversationalRetrievalChain
|
13 |
+
import streamlit as st
|
14 |
+
import os
|
15 |
+
|
16 |
+
from dotenv import load_dotenv
|
17 |
+
import warnings
|
18 |
+
logging.basicConfig(level=logging.DEBUG) # Logs at DEBUG level and above
|
19 |
+
logger = logging.getLogger(__name__)
|
20 |
+
|
21 |
+
logger.debug("Starting Streamlit app...")
|
22 |
+
# Suppress PyTorch FutureWarning
|
23 |
+
warnings.filterwarnings("ignore", message="You are using `torch.load` with `weights_only=False`")
|
24 |
+
warnings.filterwarnings("ignore", message="Tried to instantiate class '__path__._path'")
|
25 |
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
26 |
+
# Suppress generic DeprecationWarnings (including LangChain)
|
27 |
+
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
load_dotenv()
|
32 |
+
TOGETHER_AI_API = os.getenv("TOGETHER_AI")
|
33 |
+
|
34 |
+
# Streamlit Page Config
|
35 |
+
st.set_page_config(page_title="Law4her")
|
36 |
+
col1, col2, col3 = st.columns([1, 4, 1])
|
37 |
+
with col2:
|
38 |
+
st.image(
|
39 |
+
"https://res.cloudinary.com/dzzhbgbnp/image/upload/v1736073326/lawforher_logo1_yznqxr.png"
|
40 |
+
)
|
41 |
+
|
42 |
+
st.markdown(
|
43 |
+
"""
|
44 |
+
<style>
|
45 |
+
div.stButton > button:first-child {
|
46 |
+
background-color: #ffffff; /* White background */
|
47 |
+
color: #000000; /* Black text */
|
48 |
+
border: 1px solid #000000; /* Optional: Add a black border */
|
49 |
+
}
|
50 |
+
|
51 |
+
div.stButton > button:active {
|
52 |
+
background-color: #e0e0e0; /* Slightly darker white for active state */
|
53 |
+
color: #000000; /* Black text remains the same */
|
54 |
+
}
|
55 |
+
|
56 |
+
div[data-testid="stStatusWidget"] div button {
|
57 |
+
display: none;
|
58 |
+
}
|
59 |
+
.reportview-container {
|
60 |
+
margin-top: -2em;
|
61 |
+
}
|
62 |
+
#MainMenu {visibility: hidden;}
|
63 |
+
.stDeployButton {display:none;}
|
64 |
+
footer {visibility: hidden;}
|
65 |
+
#stDecoration {display:none;}
|
66 |
+
button[title="View fullscreen"]{
|
67 |
+
visibility: hidden;}
|
68 |
+
</style>
|
69 |
+
""",
|
70 |
+
unsafe_allow_html=True,
|
71 |
+
)
|
72 |
+
|
73 |
+
# Reset Conversation
|
74 |
+
def reset_conversation():
|
75 |
+
st.session_state.messages = [{"role": "assistant", "content": "Hi, how can I help you?"}]
|
76 |
+
st.session_state.memory.clear()
|
77 |
+
|
78 |
+
# Initialize chat messages and memory
|
79 |
+
if "messages" not in st.session_state:
|
80 |
+
st.session_state.messages = [{"role": "assistant", "content": "Hi, how can I help you?"}]
|
81 |
+
|
82 |
+
if "memory" not in st.session_state:
|
83 |
+
st.session_state.memory = ConversationBufferMemory(
|
84 |
+
memory_key="chat_history",
|
85 |
+
return_messages=True
|
86 |
+
)
|
87 |
+
|
88 |
+
# Load embeddings and vectorstore
|
89 |
+
embeddings = HuggingFaceEmbeddings(
|
90 |
+
model_name="nomic-ai/nomic-embed-text-v1",
|
91 |
+
model_kwargs={"trust_remote_code": True, "revision": "289f532e14dbbbd5a04753fa58739e9ba766f3c7"},
|
92 |
+
)
|
93 |
+
|
94 |
+
# Enable dangerous deserialization (safe only if the file is trusted and created by you)
|
95 |
+
db = FAISS.load_local("ipc_vector_db", embeddings, allow_dangerous_deserialization=True)
|
96 |
+
db_retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 2, "max_length": 512})
|
97 |
+
|
98 |
+
prompt_template = """<s>[INST]As a legal chatbot specializing in the Indian Penal Code, provide a concise and accurate answer based on the given context. Avoid unnecessary details or unrelated content. Only respond if the answer can be derived from the provided context; otherwise, say "The information is not available in the provided context." Do not create your own questions and answers.
|
99 |
+
CONTEXT: {context}
|
100 |
+
CHAT HISTORY: {chat_history}
|
101 |
+
QUESTION: {question}
|
102 |
+
ANSWER:
|
103 |
+
</s>[INST]
|
104 |
+
"""
|
105 |
+
|
106 |
+
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question", "chat_history"])
|
107 |
+
|
108 |
+
# Initialize the Together API
|
109 |
+
llm = Together(
|
110 |
+
model="mistralai/Mistral-7B-Instruct-v0.2",
|
111 |
+
temperature=0.5,
|
112 |
+
max_tokens=1024,
|
113 |
+
together_api_key=TOGETHER_AI_API,
|
114 |
+
)
|
115 |
+
|
116 |
+
qa = ConversationalRetrievalChain.from_llm(
|
117 |
+
llm=llm,
|
118 |
+
memory=st.session_state.memory,
|
119 |
+
retriever=db_retriever,
|
120 |
+
combine_docs_chain_kwargs={"prompt": prompt},
|
121 |
+
)
|
122 |
+
|
123 |
+
# Display chat history
|
124 |
+
for message in st.session_state.messages:
|
125 |
+
with st.chat_message(message.get("role")):
|
126 |
+
st.write(message.get("content"))
|
127 |
+
|
128 |
+
# User input
|
129 |
+
input_prompt = st.chat_input("Ask a legal question about the Indian Penal Code")
|
130 |
+
|
131 |
+
if input_prompt:
|
132 |
+
with st.chat_message("user"):
|
133 |
+
st.write(input_prompt)
|
134 |
+
|
135 |
+
st.session_state.messages.append({"role": "user", "content": input_prompt})
|
136 |
+
|
137 |
+
with st.chat_message("assistant"):
|
138 |
+
with st.status("Thinking π‘...", expanded=True):
|
139 |
+
try:
|
140 |
+
# Pass the user question
|
141 |
+
result = qa.invoke(input=input_prompt)
|
142 |
+
full_response = result.get("answer", "")
|
143 |
+
|
144 |
+
# Ensure the answer is a string
|
145 |
+
if isinstance(full_response, list):
|
146 |
+
full_response = " ".join(full_response)
|
147 |
+
elif not isinstance(full_response, str):
|
148 |
+
full_response = str(full_response)
|
149 |
+
|
150 |
+
# Display the response
|
151 |
+
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
152 |
+
st.write(full_response)
|
153 |
+
|
154 |
+
except Exception as e:
|
155 |
+
st.error(f"Error occurred: {e}")
|
156 |
+
|
157 |
+
# Add reset button
|
158 |
+
st.button("Reset All Chat π", on_click=reset_conversation)
|