sohampawar1030 commited on
Commit
cbf0b0a
·
verified ·
1 Parent(s): 6de38a9

Upload 8 files

Browse files
Files changed (8) hide show
  1. .env +6 -0
  2. Update_tracking.py +169 -0
  3. app.py +17 -0
  4. arial.ttf +0 -0
  5. credentials.json +13 -0
  6. legal_document_analysis.py +636 -0
  7. rag_pipeline.py +76 -0
  8. requirements.txt +24 -0
.env ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
2
+ EMAIL_PASSWORD=mwvm tluh heuy fowf
3
+ GROQ_API_KEY=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
4
5
6
+ EMAIL_PASS=mwvm tluh heuy fowf
Update_tracking.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+ import time
4
+ import streamlit as st
5
+ import threading
6
+ from datetime import datetime, timedelta
7
+ import smtplib
8
+ from email.message import EmailMessage
9
+ import os
10
+ from dotenv import load_dotenv
11
+ from google.oauth2.service_account import Credentials
12
+ from googleapiclient.discovery import build
13
+
14
+ # Load environment variables from .env file
15
+ load_dotenv()
16
+
17
+ # Google Sheets Configuration
18
+ SHEET_ID = '1bZjlA-UJrBhWS2jHlEQ-7nbmDvxpEoKylgxHW51Hhzc' # Google Sheets ID
19
+ RANGE = 'Sheet1!A:D' # The range where you want to append the data
20
+
21
+ # Predefined list of URLs to track
22
+ TRACKING_URLS = [
23
+ "https://gdpr-info.eu/recitals/no-1/"]
24
+
25
+ # Event to signal thread termination
26
+ stop_event = threading.Event()
27
+
28
+ # Authenticate Google Sheets API
29
+ def authenticate_google_sheets():
30
+ creds = Credentials.from_service_account_file(
31
+ 'Credentials.json',
32
+ scopes=['https://www.googleapis.com/auth/spreadsheets']
33
+ )
34
+ service = build('sheets', 'v4', credentials=creds)
35
+ return service
36
+
37
+ # Append data to Google Sheets
38
+ def append_to_google_sheets(service, url, title, content, timestamp):
39
+ values = [
40
+ [url, title, content[:200], timestamp] # Prepare row to append
41
+ ]
42
+ body = {'values': values}
43
+ try:
44
+ service.spreadsheets().values().append(
45
+ spreadsheetId=SHEET_ID,
46
+ range=RANGE,
47
+ valueInputOption="RAW",
48
+ body=body
49
+ ).execute()
50
+ st.write(f"Data appended to Google Sheets at {timestamp}.")
51
+ except Exception as e:
52
+ st.error(f"Error appending to Google Sheets: {e}")
53
+
54
+ # Send email notification
55
+ def send_email_notification(to_email, url, title, content, timestamp):
56
+ sender_email = os.getenv("EMAIL_ADDRESS")
57
+ sender_password = os.getenv("EMAIL_PASSWORD")
58
+ smtp_server = "smtp.gmail.com"
59
+ smtp_port = 587
60
+
61
+ if not sender_email or not sender_password:
62
+ st.error("Environment variables not loaded. Check your .env file.")
63
+ return
64
+
65
+ msg = EmailMessage()
66
+ msg["Subject"] = f"Website Update Notification for {url}"
67
+ msg["From"] = sender_email
68
+ msg["To"] = to_email
69
+ msg.set_content(f"""
70
+ Website: {url}
71
+ Title: {title}
72
+ Content (preview): {content[:200]}...
73
+ Tracked at: {timestamp}
74
+ """)
75
+
76
+ try:
77
+ with smtplib.SMTP(smtp_server, smtp_port) as server:
78
+ server.starttls()
79
+ server.login(sender_email, sender_password)
80
+ server.send_message(msg)
81
+ st.success(f"Notification email sent to {to_email}")
82
+ except smtplib.SMTPException as e:
83
+ st.error(f"SMTP Error: {e}")
84
+
85
+ # Fetch website data
86
+ def fetch_website_data(url):
87
+ try:
88
+ response = requests.get(url, timeout=10)
89
+ response.raise_for_status()
90
+ soup = BeautifulSoup(response.text, 'html.parser')
91
+ title = soup.title.string.strip() if soup.title else 'No title available'
92
+ paragraphs = soup.find_all('p')
93
+ content = ' '.join([p.text.strip() for p in paragraphs]) if paragraphs else 'New Notification available'
94
+ return title, content
95
+ except requests.exceptions.RequestException as e:
96
+ st.error(f"Error fetching website data: {e}")
97
+ return "Error occurred", "New notification detected. No content available due to an error."
98
+
99
+ # Track websites and store updates in Google Sheets
100
+ def track_websites(urls, recipient_email, interval=60, max_duration=20*60):
101
+ st.write(f"Started tracking for {recipient_email}")
102
+ service = authenticate_google_sheets()
103
+ last_updates = {} # To track changes in website content
104
+
105
+ start_time = datetime.now() # Record the start time
106
+ end_time = start_time + timedelta(seconds=max_duration) # Set end time (20 minutes later)
107
+
108
+ while not stop_event.is_set() and datetime.now() < end_time:
109
+ for url in urls:
110
+ title, content = fetch_website_data(url)
111
+ if title and content:
112
+ timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
113
+
114
+ # Check for updates
115
+ if url not in last_updates or last_updates[url] != (title, content):
116
+ last_updates[url] = (title, content)
117
+
118
+ # Append to Google Sheets
119
+ append_to_google_sheets(service, url, title, content, timestamp)
120
+
121
+ # Send notification email
122
+ try:
123
+ send_email_notification(recipient_email, url, title, content, timestamp)
124
+ except Exception as e:
125
+ st.error(f"Error sending email notification: {e}")
126
+
127
+ # Wait for the next interval or until stop_event is set
128
+ stop_event.wait(interval)
129
+
130
+ st.write("Stopped tracking after 20 minutes.")
131
+
132
+ # Display tracking status
133
+ def display_tracking_status():
134
+ st.title("Update Tracking System with Notifications")
135
+
136
+ email_input = st.text_input("Enter your email for notifications:")
137
+
138
+ # Maintain thread state
139
+ if "tracking_thread" not in st.session_state:
140
+ st.session_state["tracking_thread"] = None
141
+
142
+ if email_input:
143
+ # Start tracking
144
+ if st.button("Tracking"):
145
+ if st.session_state["tracking_thread"] is None or not st.session_state["tracking_thread"].is_alive():
146
+ stop_event.clear() # Clear the stop flag to allow tracking
147
+ thread = threading.Thread(target=track_websites, args=(TRACKING_URLS, email_input), daemon=True)
148
+ thread.start()
149
+ st.session_state["tracking_thread"] = thread
150
+ st.success(f"Notifications will be sent to {email_input}.")
151
+ else:
152
+ st.warning("Tracking Updates is already running.")
153
+
154
+ # Stop tracking
155
+ if st.button("Stop Tracking"):
156
+ if st.session_state["tracking_thread"] is not None and st.session_state["tracking_thread"].is_alive():
157
+ stop_event.set() # Signal the thread to stop
158
+ st.session_state["tracking_thread"].join() # Wait for the thread to finish
159
+ st.session_state["tracking_thread"] = None
160
+ st.success("Tracking stopped.")
161
+ else:
162
+ st.warning("No active tracking to stop.")
163
+
164
+ # Main function
165
+ def main():
166
+ display_tracking_status()
167
+
168
+ if __name__ == "__main__":
169
+ main()
app.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import Update_tracking
3
+ import legal_document_analysis
4
+ from rag_pipeline import extract_text_from_pdf, create_vector_store, create_qa_pipeline
5
+
6
+ # Streamlit App Navigation
7
+ def main():
8
+ st.sidebar.title("Navigation")
9
+ page = st.sidebar.radio("Choose a page", ["Update Tracking", "Legal Document Analysis"])
10
+
11
+ if page == "Update Tracking":
12
+ Update_tracking.display_tracking_status() # Ensure the correct function name
13
+ elif page == "Legal Document Analysis":
14
+ legal_document_analysis.display_legal_analysis_page()
15
+
16
+ if __name__ == "__main__":
17
+ main()
arial.ttf ADDED
Binary file (915 kB). View file
 
credentials.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "service_account",
3
+ "project_id": "graphite-setup-448304-u1",
4
+ "private_key_id": "c745314b1cd8b8a135a10c4819dabdb0d3d9a552",
5
+ "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKKXcJFGxBxdan\nBPI+M1js/YcBI3646efu1U88oWG7CrZDYEs6ZMTx46mdsgaiJ3LCmt6NRI691rdt\nOl2rh3KZ/rMLQXyyQvMfePwk4t1f/MFbHK75pD+nUWsUlbXawnGfZXRJxV9gC0F8\nindrqRFXqfrJaCjsexW5WdKEt5FAb4OSRSEAEzO6AC7VOAnME+ctOvzI8pg8Cuaw\nkJf8/0fTN/1Fgmygmj3om16J3gYVmxvaTYjZnrM6hQhvHFvTCkoLYk4DSXhIBxce\nzD/qLgoqwFGjlhrveb8FXErzTXlulAed/R49HJnbSKsL+Nq2guthoYV/j3Bg+TZ4\ngwPdlWQxAgMBAAECggEAC/jb6eud1CDUWxCDzItzqhhDCx4YInkGsVoMTtvDSwWB\nwPh2SAtg7u7DtBkvg6GYrh3oDV9ooSUyuGsfEjoLn0NKROHAUdm9ZmkerIwLXmR8\namoG0jWoB99+WwoZKo7+ldkXI4nG4cwU2RxloVhyNJn2RkBvAP8yjIcdXJr25QUC\nqA7v+grCR2CcxBkbRXoh/BQ+4wJQjT12eW7ybYXgxJb8HK8fWcGWXv8ir+KPNOrL\nONMhUS8rHyJ3i/9i/uwMP61pqmKf5x1gBTM1h7Wr+8tTIbCsrv1D8DSwOqvHrgTb\nDzUDKmpZFBUD0xyKETR5r7xTpje1M/xcTQlOskllyQKBgQDmhxz3kcnEHN88J1Jq\nvCXD4+0pBF+tzNc9ZmSnu2W+EBq5mAMu7sDTGRZtj/GisyYgm8YMybGnh38IPh5f\nOM+dqcXOM68ARi1srkGXPbLOMksAALhx9NVkbAZvm8Y6wIw6S5V/SsRiW8dq0VTM\nW2ncwUfn9gV3jstdAokjsZTM2QKBgQDgf/l8AZO6MHl/lujz480HP1ye/bkxhPnZ\ndsyHJG7zKRjQsqa1Kj006mGVLtcaC+zhNxGpeLrBbK/+OzyiBuM5OX0UXeS8zfIp\nPkXliSNarWIncgTCEwrcZOe1HFhIYYfd8JKebpaMtPOGYs2ZaKALMNXLDAJv5kSe\nrB0y0nabGQKBgEBKLAzrDo7v0l3vRW/Y2ttfpFNNEfXha6GG/ck9XOl9ikZ6G2Eq\nWoJEu9mATE4okhiD+N62HSJwij/dOxEpQdVhdHXGOibka2Sk0EwCe+w/YGU2zjmR\nozVnXwAfPFBERJc5Xw7p5gzcNagXiwOW9tYG3SvWk729B///ZgmbS7k5AoGBAIWX\nwgeoDJe0duiVDgTsHzrb2qsGAylTnzZoOmeFJ7ztTMYMOaX+/7M4cl9LI1dtl0Wh\n9pGptUCbA9O+vmMQxDBuZF81TIOJ7jvawc9In1A7cvhUIj63BDSIm1WsC2DvIOHS\nYf2Dg3UxzOTAcOaEWxCtu9t7Rwv9tAHUG//9O/UpAoGBALNfN69s+vhpPX4Ioq3B\nyv0bAk3QU4NZHnX7d48eFbWSS5Hhbboo/T/0KPcOf7ynOptsLeV+HqLS/WLXsJ5J\npKw7mW7pb0VoLv+JokWeAtIc4c0Ft2EZtvb+6t3GcrrDJsFBshUdwblrP9sl6i2X\nYUd3Ck2TaXpXirfFdUgByhLl\n-----END PRIVATE KEY-----\n",
6
+ "client_email": "[email protected]",
7
+ "client_id": "105183693282963617063",
8
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
9
+ "token_uri": "https://oauth2.googleapis.com/token",
10
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
11
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/sheet-865%40graphite-setup-448304-u1.iam.gserviceaccount.com",
12
+ "universe_domain": "googleapis.com"
13
+ }
legal_document_analysis.py ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import PyPDF2
3
+ import streamlit as st
4
+ from dotenv import load_dotenv
5
+ from langchain_groq import ChatGroq
6
+ from docx import Document
7
+ import matplotlib.pyplot as plt
8
+ import io
9
+ import base64
10
+ from email.mime.multipart import MIMEMultipart
11
+ from email.mime.text import MIMEText
12
+ from email.mime.application import MIMEApplication
13
+ import smtplib
14
+ from fpdf import FPDF
15
+ import getpass
16
+ import pandas as pd
17
+ import seaborn as sns
18
+
19
+ # Load environment variables from .env file
20
+ load_dotenv()
21
+
22
+ # Check if the GROQ_API_KEY is in the environment variables
23
+ if not os.environ.get("GROQ_API_KEY"):
24
+ os.environ["GROQ_API_KEY"] = getpass.getpass("Enter API key for Groq: ")
25
+
26
+ # Initialize the model
27
+ model = ChatGroq(model="llama-3.1-8b-instant", api_key=os.environ.get("GROQ_API_KEY"))
28
+
29
+ # Custom CSS for improved aesthetics
30
+ st.markdown(
31
+ """
32
+ <style>
33
+ .main {
34
+ background-color: #f0f2f5;
35
+ }
36
+ .sidebar .sidebar-content {
37
+ background-color: #ffffff;
38
+ }
39
+ h1 {
40
+ color: #2C3E50;
41
+ }
42
+ h2 {
43
+ color: #2980B9;
44
+ }
45
+ .stButton button {
46
+ background-color: #2980B9;
47
+ color: white;
48
+ border: None;
49
+ border-radius: 5px;
50
+ padding: 10px;
51
+ }
52
+ </style>
53
+ """,
54
+ unsafe_allow_html=True
55
+ )
56
+
57
+ # Function to read PDF content
58
+ def read_pdf(file):
59
+ reader = PyPDF2.PdfReader(file)
60
+ text = ""
61
+ for page in reader.pages:
62
+ text += page.extract_text()
63
+ return text
64
+
65
+ # Function to extract text from DOCX files
66
+ def extract_text_from_docx(file):
67
+ doc = Document(file)
68
+ text = "\n".join([paragraph.text for paragraph in doc.paragraphs])
69
+ return text
70
+
71
+ # Function to preprocess text
72
+ def preprocess_text(text):
73
+ return " ".join(text.replace("\n", " ").replace("\r", " ").split())
74
+
75
+ # Function to chunk large text into smaller parts
76
+ def chunk_text(text, max_tokens=2000):
77
+ chunks = []
78
+ current_chunk = []
79
+ current_length = 0
80
+
81
+ for sentence in text.split(". "):
82
+ sentence_length = len(sentence.split())
83
+ if current_length + sentence_length <= max_tokens:
84
+ current_chunk.append(sentence)
85
+ current_length += sentence_length
86
+ else:
87
+ chunks.append(". ".join(current_chunk))
88
+ current_chunk = [sentence]
89
+ current_length = sentence_length
90
+
91
+ if current_chunk:
92
+ chunks.append(". ".join(current_chunk))
93
+
94
+ return chunks
95
+
96
+ # Function to generate summary for each chunk
97
+ def generate_summary(text):
98
+ prompt = f"Please summarize the following content:\n\n{text}"
99
+ try:
100
+ response = model.invoke(prompt)
101
+ if hasattr(response, 'content'):
102
+ summary = response.content
103
+ else:
104
+ summary = str(response)
105
+ return summary.strip() if summary else "No summary available."
106
+ except Exception as e:
107
+ st.error(f"Error generating summary: {str(e)}")
108
+ return None
109
+
110
+ # Function to summarize large texts
111
+ def summarize_large_text(text, chunk_limit=5000):
112
+ chunks = chunk_text(text, max_tokens=chunk_limit)
113
+ summaries = []
114
+ for chunk in chunks:
115
+ summary = generate_summary(chunk)
116
+ if summary:
117
+ summaries.append(summary)
118
+ return " ".join(summaries)
119
+
120
+ # Function to detect key clauses
121
+ def detect_key_clauses(text):
122
+ key_clauses = [
123
+ {"clause": "confidentiality", "summary": "Confidentiality clauses ensure that sensitive information remains protected."},
124
+ {"clause": "liability", "summary": "Liability clauses outline the responsibility for damages or losses incurred."},
125
+ {"clause": "termination", "summary": "Termination clauses specify the conditions under which a contract may be ended."},
126
+ {"clause": "force majeure", "summary": "Force majeure clauses excuse parties from performance obligations due to unforeseen events."},
127
+ {"clause": "governing law", "summary": "Governing law clauses specify which jurisdiction's laws will govern the contract."},
128
+ {"clause": "dispute resolution", "summary": "Dispute resolution clauses specify how conflicts between parties will be resolved."},
129
+ {"clause": "amendment", "summary": "Amendment clauses outline the process for changing the terms of the contract."},
130
+ {"clause": "warranty", "summary": "Warranty clauses provide assurances regarding the quality or condition of goods or services."},
131
+ ]
132
+
133
+ detected_clauses = []
134
+ for clause in key_clauses:
135
+ if clause["clause"].lower() in text.lower():
136
+ clause_start = text.lower().find(clause["clause"].lower())
137
+ context = text[clause_start - 50: clause_start + 200]
138
+ explanation = f"The document mentions '{clause['clause']}' clause. Context: {context.strip()}..."
139
+ detected_clauses.append({
140
+ "clause": clause["clause"].capitalize(),
141
+ "summary": clause["summary"],
142
+ "explanation": explanation
143
+ })
144
+
145
+ return detected_clauses
146
+
147
+ # Function to detect hidden obligations or dependencies
148
+ def detect_hidden_obligations_or_dependencies(text, summary):
149
+ hidden_obligations = [
150
+ {"phrase": "dependent upon", "summary": "This suggests that some action is conditional upon another."},
151
+ {"phrase": "if", "summary": "This indicates that certain conditions must be met to fulfill the obligation."},
152
+ {"phrase": "may be required", "summary": "Implies that the party could be obligated to perform an action under specific conditions."},
153
+ {"phrase": "should", "summary": "Implies a recommendation or requirement, though not explicitly mandatory."},
154
+ {"phrase": "obligated to", "summary": "Indicates a clear, binding duty to perform an action."},
155
+ ]
156
+
157
+ hidden_dependencies = []
158
+
159
+ for item in hidden_obligations:
160
+ if item["phrase"].lower() in text.lower() or item["phrase"].lower() in summary.lower():
161
+ phrase_start = text.lower().find(item["phrase"].lower())
162
+ context = text[phrase_start - 50: phrase_start + 200]
163
+ hidden_dependencies.append({
164
+ "phrase": item["phrase"],
165
+ "summary": item["summary"],
166
+ "context": context.strip()
167
+ })
168
+
169
+ return hidden_dependencies
170
+
171
+ # Function to detect risks in the text
172
+ def detect_risks(text, summary):
173
+ risk_phrases = [
174
+ {"phrase": "penalty", "summary": "This indicates financial or legal consequences.", "risk_level": "High"},
175
+ {"phrase": "liability", "summary": "This suggests potential financial responsibility.", "risk_level": "Medium"},
176
+ {"phrase": "default", "summary": "This can lead to serious legal consequences.", "risk_level": "High"},
177
+ {"phrase": "breach", "summary": "This may expose the party to significant penalties.", "risk_level": "High"},
178
+ {"phrase": "suspension", "summary": "This indicates risks of halting services.", "risk_level": "Medium"},
179
+ {"phrase": "should", "summary": "This implies a recommendation, which may not be mandatory.", "risk_level": "Low"},
180
+ {"phrase": "may be required", "summary": "This suggests that obligations could exist under certain conditions.", "risk_level": "Low"},
181
+ {"phrase": "indemnify", "summary": "This entails a duty to compensate for harm or loss, indicating potential financial risk.", "risk_level": "High"},
182
+ {"phrase": "termination for cause", "summary": "This indicates a risk of ending the contract due to specific failures.", "risk_level": "High"},
183
+ {"phrase": "compliance", "summary": "Non-compliance with regulations can lead to legal penalties.", "risk_level": "High"},
184
+ ]
185
+
186
+ detected_risks = []
187
+
188
+ for item in risk_phrases:
189
+ if item["phrase"].lower() in text.lower() or item["phrase"].lower() in summary.lower():
190
+ phrase_start = text.lower().find(item["phrase"].lower())
191
+ context = text[phrase_start - 50: phrase_start + 200]
192
+ detected_risks.append({
193
+ "phrase": item["phrase"],
194
+ "summary": item["summary"],
195
+ "context": context.strip(),
196
+ "risk_level": item["risk_level"]
197
+ })
198
+
199
+ return detected_risks
200
+
201
+ # Function to calculate overall risk score
202
+ def calculate_overall_risk_score(detected_risks):
203
+ risk_scores = {
204
+ "High": 3,
205
+ "Medium": 2,
206
+ "Low": 1
207
+ }
208
+ total_score = sum(risk_scores.get(risk['risk_level'], 0) for risk in detected_risks)
209
+ return total_score
210
+
211
+ # Function to plot risk assessment matrix
212
+ def plot_risk_assessment_matrix(detected_risks):
213
+ likelihood = []
214
+ impact = []
215
+
216
+ for risk in detected_risks:
217
+ if risk['risk_level'] == 'High':
218
+ likelihood.append(3)
219
+ impact.append(3)
220
+ elif risk['risk_level'] == 'Medium':
221
+ likelihood.append(2)
222
+ impact.append(2)
223
+ elif risk['risk_level'] == 'Low':
224
+ likelihood.append(1)
225
+ impact.append(1)
226
+
227
+ fig, ax = plt.subplots(figsize=(6, 6))
228
+ scatter = ax.scatter(likelihood, impact, alpha=0.6)
229
+
230
+ ax.set_xticks([1, 2, 3])
231
+ ax.set_yticks([1, 2, 3])
232
+ ax.set_xticklabels(['Low', 'Medium', 'High'])
233
+ ax.set_yticklabels(['Low', 'Medium', 'High'])
234
+ ax.set_xlabel('Likelihood')
235
+ ax.set_ylabel('Impact')
236
+ ax.set_title('Risk Assessment Matrix')
237
+
238
+ for i in range(len(detected_risks)):
239
+ ax.annotate(detected_risks[i]['phrase'], (likelihood[i], impact[i]))
240
+
241
+ buf = io.BytesIO()
242
+ plt.savefig(buf, format="png", bbox_inches='tight')
243
+ buf.seek(0)
244
+
245
+ img_str = base64.b64encode(buf.read()).decode('utf-8')
246
+ buf.close()
247
+
248
+ return img_str
249
+
250
+ # Function to plot risk level distribution pie chart
251
+ def plot_risk_level_distribution(detected_risks):
252
+ risk_levels = [risk['risk_level'] for risk in detected_risks]
253
+ level_counts = {level: risk_levels.count(level) for level in set(risk_levels)}
254
+
255
+ fig, ax = plt.subplots(figsize=(4, 3))
256
+ ax.pie(level_counts.values(), labels=level_counts.keys(), autopct='%1.1f%%', startangle=90)
257
+ ax.axis('equal')
258
+
259
+ plt.title("Risk Level Distribution", fontsize=10)
260
+
261
+ buf = io.BytesIO()
262
+ plt.savefig(buf, format="png", bbox_inches='tight')
263
+ buf.seek(0)
264
+
265
+ img_str = base64.b64encode(buf.read()).decode('utf-8')
266
+ buf.close()
267
+
268
+ return img_str
269
+
270
+ # Function to plot risks by type bar chart
271
+ def plot_risks_by_type(detected_risks):
272
+ risk_phrases = [risk['phrase'] for risk in detected_risks]
273
+ phrase_counts = {phrase: risk_phrases.count(phrase) for phrase in set(risk_phrases)}
274
+
275
+ fig, ax = plt.subplots(figsize=(4, 3))
276
+ ax.bar(phrase_counts.keys(), phrase_counts.values(), color='lightcoral')
277
+ plt.xticks(rotation=45, ha='right')
278
+ ax.set_title("Risks by Type", fontsize=10)
279
+ ax.set_ylabel("Count")
280
+
281
+ buf = io.BytesIO()
282
+ plt.savefig(buf, format="png", bbox_inches='tight')
283
+ buf.seek(0)
284
+
285
+ img_str = base64.b64encode(buf.read()).decode('utf-8')
286
+ buf.close()
287
+
288
+ return img_str
289
+
290
+ # Function to plot stacked bar chart of risks by level
291
+ def plot_stacked_bar_chart(detected_risks):
292
+ risk_levels = ['High', 'Medium', 'Low']
293
+ level_counts = {level: 0 for level in risk_levels}
294
+
295
+ for risk in detected_risks:
296
+ level_counts[risk['risk_level']] += 1
297
+
298
+ fig, ax = plt.subplots(figsize=(4, 3))
299
+ ax.bar(level_counts.keys(), level_counts.values(), color=['#ff9999', '#66b3ff', '#99ff99'])
300
+ ax.set_title("Stacked Bar Chart of Risks by Level", fontsize=10)
301
+ ax.set_ylabel("Count")
302
+
303
+ buf = io.BytesIO()
304
+ plt.savefig(buf, format="png", bbox_inches='tight')
305
+ buf.seek(0)
306
+
307
+ img_str = base64.b64encode(buf.read()).decode('utf-8')
308
+ buf.close()
309
+
310
+ return img_str
311
+
312
+ # Function to plot risk heatmap
313
+ def plot_risk_heatmap(detected_risks):
314
+ risk_data = {'Risk Level': [], 'Count': []}
315
+
316
+ for risk in detected_risks:
317
+ risk_data['Risk Level'].append(risk['risk_level'])
318
+ risk_data['Count'].append(1)
319
+
320
+ df = pd.DataFrame(risk_data)
321
+ heatmap_data = df.groupby('Risk Level').count().reset_index()
322
+
323
+ fig, ax = plt.subplots(figsize=(4, 3))
324
+ sns.heatmap(heatmap_data.pivot_table(index='Risk Level', values='Count'), annot=True, cmap='YlGnBu', ax=ax)
325
+ ax.set_title("Risk Heatmap")
326
+
327
+ buf = io.BytesIO()
328
+ plt.savefig(buf, format="png", bbox_inches='tight')
329
+ buf.seek(0)
330
+
331
+ img_str = base64.b64encode(buf.read()).decode('utf-8')
332
+ buf.close()
333
+
334
+ return img_str
335
+
336
+ # Function to convert base64 to image
337
+ def base64_to_image(data):
338
+ return io.BytesIO(base64.b64decode(data))
339
+
340
+ # Function to generate PDF document with improved aesthetics
341
+ def generate_pdf_analysis(document_text, summary, detected_clauses, hidden_obligations, detected_risks, risk_assessment_matrix, risk_level_distribution, risks_by_type, stacked_bar_chart, risk_heatmap):
342
+ pdf = FPDF()
343
+ pdf.add_page()
344
+
345
+ # Set page borders
346
+ pdf.set_draw_color(0, 0, 0)
347
+ pdf.rect(5, 5, 200, 287)
348
+
349
+ # Add Arial font
350
+ pdf.add_font("Arial", "", "arial.ttf", uni=True)
351
+ pdf.set_font("Arial", size=12)
352
+
353
+ # Title
354
+ pdf.set_font("Arial", 'B', 16)
355
+ pdf.cell(0, 10, 'Legal Document Analysis Report', ln=True, align='C')
356
+ pdf.ln(10)
357
+
358
+ # Executive Summary
359
+ pdf.set_font("Arial", 'B', 14)
360
+ pdf.cell(0, 10, 'Executive Summary', ln=True)
361
+ pdf.set_font("Arial", '', 12)
362
+ pdf.multi_cell(0, 10, summary)
363
+ pdf.ln(10)
364
+
365
+ # Risks Section
366
+ pdf.set_font("Arial", 'B', 14)
367
+ pdf.cell(0, 10, 'Risk Analysis', ln=True)
368
+ pdf.set_font("Arial", '', 12)
369
+ for risk in detected_risks:
370
+ pdf.cell(0, 10, f"{risk['phrase']}: {risk['summary']} (Risk Level: {risk['risk_level']})", ln=True)
371
+ pdf.ln(10)
372
+
373
+ # Add visualizations for risks
374
+ pdf.image(base64_to_image(risk_assessment_matrix), x=10, y=pdf.get_y(), w=90)
375
+ pdf.image(base64_to_image(risk_level_distribution), x=110, y=pdf.get_y()-50, w=90) # Position next to the first image
376
+ pdf.ln(60)
377
+
378
+ pdf.image(base64_to_image(risks_by_type), x=10, y=pdf.get_y(), w=90)
379
+ pdf.image(base64_to_image(stacked_bar_chart), x=110, y=pdf.get_y()-50, w=90) # Position next to the previous image
380
+ pdf.ln(60)
381
+
382
+ pdf.image(base64_to_image(risk_heatmap), x=10, y=pdf.get_y(), w=190) # Fit image to width
383
+ pdf.ln(10)
384
+
385
+ # Footer
386
+ pdf.set_y(-15)
387
+ pdf.set_font("Arial", 'I', 8)
388
+ pdf.cell(0, 10, f'Page {pdf.page_no()}', 0, 0, 'C')
389
+
390
+ return pdf
391
+
392
+ # Function to handle chatbot interaction
393
+ def chatbot_query(user_input):
394
+ try:
395
+ response = model({"text": user_input})
396
+ if isinstance(response, dict) and 'text' in response:
397
+ return response['text']
398
+ else:
399
+ return "Error: Unexpected response format."
400
+ except Exception as e:
401
+ return f"Error: {str(e)}"
402
+
403
+ # Function to generate suggestions for improvement
404
+ def generate_suggestions(text):
405
+ suggestions = []
406
+
407
+ if "shall" in text.lower():
408
+ suggestions.append("Consider replacing 'shall' with 'must' for clarity.")
409
+ if "may" in text.lower():
410
+ suggestions.append("Clarify the conditions under which actions 'may' be taken.")
411
+ if "if" in text.lower() and "then" not in text.lower():
412
+ suggestions.append("Ensure conditional statements are clear and complete.")
413
+ if "not" in text.lower():
414
+ suggestions.append("Review negative clauses to ensure they are not overly restrictive.")
415
+
416
+ return suggestions
417
+
418
+ # Function to send feedback via email
419
+ def send_feedback(feedback_content):
420
+ sender_email = os.getenv("SENDER_EMAIL")
421
+ receiver_email = os.getenv("FEEDBACK_EMAIL")
422
+ password = os.getenv("EMAIL_PASS")
423
+
424
+ msg = MIMEMultipart()
425
+ msg['From'] = sender_email
426
+ msg['To'] = receiver_email
427
+ msg['Subject'] = "User Feedback on Legal Document Analysis"
428
+
429
+ msg.attach(MIMEText(feedback_content, 'plain'))
430
+
431
+ try:
432
+ with smtplib.SMTP('smtp.gmail.com', 587) as server:
433
+ server.starttls()
434
+ server.login(sender_email, password)
435
+ server.send_message(msg)
436
+ return True
437
+ except Exception as e:
438
+ return False
439
+
440
+ # Function to send PDF via email
441
+ def send_pdf_via_email(pdf_buffer, recipient_email):
442
+ sender_email = os.getenv("SENDER_EMAIL")
443
+ password = os.getenv("EMAIL_PASS")
444
+
445
+ msg = MIMEMultipart()
446
+ msg['From'] = sender_email
447
+ msg['To'] = recipient_email
448
+ msg['Subject'] = "Legal Document Analysis PDF"
449
+
450
+ msg.attach(MIMEText("Please find the attached analysis of your legal document.", 'plain'))
451
+
452
+ # Attach the PDF
453
+ pdf_attachment = io.BytesIO(pdf_buffer.getvalue())
454
+ pdf_attachment.seek(0)
455
+ part = MIMEApplication(pdf_attachment.read(), Name='legal_document_analysis.pdf')
456
+ part['Content-Disposition'] = 'attachment; filename="legal_document_analysis.pdf"'
457
+ msg.attach(part)
458
+
459
+ try:
460
+ with smtplib.SMTP('smtp.gmail.com', 587) as server:
461
+ server.starttls()
462
+ server.login(sender_email, password)
463
+ server.send_message(msg)
464
+ return True
465
+ except Exception as e:
466
+ return False
467
+
468
+ # Function to simulate tracking updates in the document
469
+ def track_updates(document_text):
470
+ updates = [
471
+ {"update": "Updated confidentiality clause.", "suggestion": "Consider specifying the duration of confidentiality."},
472
+ {"update": "Revised liability limits.", "suggestion": "Ensure the limits are realistic and compliant with regulations."},
473
+ {"update": "Clarified termination conditions.", "suggestion": "Check if all potential termination scenarios are covered."},
474
+ ]
475
+ return updates
476
+
477
+ # Function to get suggestion from Groq API based on the update
478
+ def get_update_suggestion(update):
479
+ prompt = f"Suggest improvements or updates for this legal clause: {update}"
480
+ suggestion = generate_summary(prompt)
481
+ return suggestion if suggestion else "No suggestion available."
482
+
483
+ # Function to display feedback form
484
+ def display_feedback_form():
485
+ st.subheader("Feedback Form")
486
+ feedback = st.text_area("Please provide your feedback or suggestions:")
487
+
488
+ question1 = st.radio("How would you rate the analysis?", ("Excellent", "Good", "Fair", "Poor"))
489
+ question2 = st.radio("Would you recommend this tool to others?", ("Yes", "No"))
490
+
491
+ if st.button("Submit Feedback"):
492
+ feedback_content = f"Feedback: {feedback}\nRating: {question1}\nRecommendation: {question2}"
493
+ if send_feedback(feedback_content):
494
+ st.success("Thank you for your feedback! It has been sent.")
495
+ else:
496
+ st.error("Failed to send feedback. Please try again later.")
497
+
498
+ # Main function to display the legal analysis page
499
+ def display_legal_analysis_page():
500
+ st.title("📜 Legal Document Analysis with Groq API")
501
+
502
+ uploaded_file = st.file_uploader("Upload your legal document (PDF or DOCX)", type=["pdf", "docx"])
503
+ if uploaded_file:
504
+ if uploaded_file.name.endswith(".pdf"):
505
+ document_text = preprocess_text(read_pdf(uploaded_file))
506
+ elif uploaded_file.name.endswith(".docx"):
507
+ document_text = preprocess_text(extract_text_from_docx(uploaded_file))
508
+ else:
509
+ st.error("Unsupported file type!")
510
+ return
511
+
512
+ tabs = st.tabs(["📄 Document Text", "🔍 Summary", "🔑 Key Clauses", "🔒 Hidden Obligations", "⚠ Risk Analysis", "💡 Suggestions & Chatbot", "🔄 Update Tracker"])
513
+
514
+ with tabs[0]:
515
+ st.subheader("Document Text")
516
+ st.write(document_text)
517
+
518
+ with tabs[1]:
519
+ st.subheader("Summary")
520
+ summary = summarize_large_text(document_text)
521
+ st.write(summary)
522
+
523
+ with tabs[2]:
524
+ st.subheader("Key Clauses Identified")
525
+ detected_clauses = detect_key_clauses(document_text)
526
+ if detected_clauses:
527
+ for clause in detected_clauses:
528
+ with st.expander(clause['clause'], expanded=False):
529
+ st.write(f"*Summary:* {clause['summary']}")
530
+ st.write(f"*Context:* {clause['explanation']}")
531
+
532
+ else:
533
+ st.write("No key clauses detected.")
534
+
535
+ with tabs[3]:
536
+ st.subheader("Hidden Obligations and Dependencies")
537
+ hidden_obligations = detect_hidden_obligations_or_dependencies(document_text, summary)
538
+ if hidden_obligations:
539
+ for obligation in hidden_obligations:
540
+ st.write(f"{obligation['phrase']}: {obligation['summary']}")
541
+ st.write(obligation['context'])
542
+ else:
543
+ st.write("No hidden obligations detected.")
544
+
545
+ with tabs[4]:
546
+ st.subheader("Risk Analysis")
547
+ detected_risks = detect_risks(document_text, summary)
548
+ overall_risk_score = calculate_overall_risk_score(detected_risks)
549
+
550
+ st.write(f"*Overall Risk Score:* {overall_risk_score}")
551
+
552
+ if detected_risks:
553
+ for risk in detected_risks:
554
+ with st.expander(risk['phrase'], expanded=False):
555
+ st.write(f"*Summary:* {risk['summary']} (Risk Level: {risk['risk_level']})")
556
+ short_context = risk['context'].strip().split('. ')[0] + '.'
557
+ st.write(f"*Context:* {short_context}")
558
+ else:
559
+ st.write("No risks detected.")
560
+
561
+ # Generate all visualizations
562
+ risk_assessment_matrix = plot_risk_assessment_matrix(detected_risks)
563
+ risk_level_distribution = plot_risk_level_distribution(detected_risks)
564
+ risks_by_type = plot_risks_by_type(detected_risks)
565
+ stacked_bar_chart = plot_stacked_bar_chart(detected_risks)
566
+ risk_heatmap = plot_risk_heatmap(detected_risks)
567
+
568
+ # Display the charts
569
+ st.image(f"data:image/png;base64,{risk_assessment_matrix}", caption="Risk Assessment Matrix")
570
+ st.image(f"data:image/png;base64,{risk_level_distribution}", caption="Risk Level Distribution")
571
+ st.image(f"data:image/png;base64,{risks_by_type}", caption="Risks by Type")
572
+ st.image(f"data:image/png;base64,{stacked_bar_chart}", caption="Stacked Bar Chart of Risks by Level")
573
+ st.image(f"data:image/png;base64,{risk_heatmap}", caption="Risk Heatmap")
574
+
575
+ with tabs[5]:
576
+ st.subheader("Suggestions for Improvement")
577
+ suggestions = generate_suggestions(document_text)
578
+ for suggestion in suggestions:
579
+ st.write(f"- {suggestion}")
580
+
581
+ st.subheader("Chatbot for Analysis")
582
+ user_input = st.text_input("Ask the chatbot about your document:")
583
+ if st.button("Send"):
584
+ if user_input:
585
+ chatbot_response = chatbot_query(user_input)
586
+ st.write("*Chatbot Response:*")
587
+ st.write(chatbot_response)
588
+ else:
589
+ st.warning("Please enter a question.")
590
+
591
+ # Download PDF Analysis Button
592
+ st.subheader("Download Analysis as PDF")
593
+ pdf_buffer = io.BytesIO()
594
+ pdf = generate_pdf_analysis(document_text, summary, detected_clauses, hidden_obligations, detected_risks, risk_assessment_matrix, risk_level_distribution, risks_by_type, stacked_bar_chart, risk_heatmap)
595
+ pdf.output(pdf_buffer, 'F')
596
+ pdf_buffer.seek(0)
597
+
598
+ # Add download button for PDF
599
+ st.download_button(
600
+ label="Download PDF Analysis",
601
+ data=pdf_buffer,
602
+ file_name="legal_document_analysis.pdf",
603
+ mime="application/pdf"
604
+ )
605
+
606
+ # Input for recipient email
607
+ recipient_email = st.text_input("Enter your email address to receive the PDF:")
608
+
609
+ # Button to send PDF via email
610
+ if st.button("Send PDF Analysis"):
611
+ if recipient_email:
612
+ if send_pdf_via_email(pdf_buffer, recipient_email):
613
+ st.success("PDF has been sent successfully!")
614
+ else:
615
+ st.error("Failed to send PDF. Please try again.")
616
+ else:
617
+ st.warning("Please enter a valid email address.")
618
+
619
+ # Feedback Form Section
620
+ display_feedback_form()
621
+
622
+ with tabs[6]: # Update Tracker Tab
623
+ st.subheader("Document Updates")
624
+ updates = track_updates(document_text)
625
+ if st.button("Show Updates"):
626
+ if updates:
627
+ for update in updates:
628
+ with st.expander(update['update'], expanded=False):
629
+ suggestion = get_update_suggestion(update['update'])
630
+ st.write(f"*Suggestion:* {suggestion}")
631
+ else:
632
+ st.write("No updates detected.")
633
+
634
+ # Run the application
635
+ if __name__ == "__main__":
636
+ display_legal_analysis_page()
rag_pipeline.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from transformers import pipeline
4
+ from PyPDF2 import PdfReader
5
+ from langchain.text_splitter import CharacterTextSplitter
6
+ from langchain.vectorstores import FAISS
7
+ from langchain.embeddings import HuggingFaceEmbeddings
8
+ from langchain.chains import RetrievalQA
9
+ from langchain.llms import HuggingFaceHub
10
+
11
+ # Load environment variables from .env file
12
+ load_dotenv()
13
+
14
+ def extract_text_from_pdf(pdf_file):
15
+ """Extracts text from a PDF file."""
16
+ reader = PdfReader(pdf_file)
17
+ text = ""
18
+ for page in reader.pages:
19
+ text += page.extract_text() or ""
20
+ return text
21
+
22
+ def create_vector_store(text, embeddings_model="sentence-transformers/all-MiniLM-L6-v2"):
23
+ """Creates a FAISS vector store from the input text."""
24
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
25
+ texts = text_splitter.split_text(text)
26
+ embeddings = HuggingFaceEmbeddings(model_name=embeddings_model)
27
+ return FAISS.from_texts(texts, embeddings)
28
+
29
+ def create_qa_pipeline(vector_store, llm_model="EleutherAI/gpt-neo-2.7B"):
30
+ """Creates a Retrieval-based Question-Answering pipeline."""
31
+
32
+ # Get the Hugging Face API token from the environment variable
33
+ huggingfacehub_api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
34
+
35
+ if huggingfacehub_api_token is None:
36
+ raise ValueError("HuggingFace Hub API token is missing! Please set the 'HUGGINGFACEHUB_API_TOKEN' in your .env file.")
37
+
38
+ retriever = vector_store.as_retriever()
39
+
40
+ # Initialize Hugging Face LLM with the API token
41
+ llm = HuggingFaceHub(
42
+ repo_id=llm_model, # specify the repo_id (e.g., gpt-neo-2.7B)
43
+ huggingfacehub_api_token=huggingfacehub_api_token,
44
+ task="text-generation" # specify the task (e.g., text-generation for language models)
45
+ )
46
+
47
+ return RetrievalQA.from_chain_type(llm, retriever=retriever)
48
+
49
+ def process_pdf_and_answer(pdf_path):
50
+ """Processes the PDF and returns answers to the text inside."""
51
+
52
+ # Extract text from the PDF
53
+ text = extract_text_from_pdf(pdf_path)
54
+
55
+ # Create a FAISS vector store
56
+ vector_store = create_vector_store(text)
57
+
58
+ # Create a QA pipeline
59
+ qa_pipeline = create_qa_pipeline(vector_store)
60
+
61
+ # Answer the question
62
+ # Since you no longer need to ask a question manually, just extract some context
63
+ answer = qa_pipeline.run("Extract key information from the PDF.") # Modify to get a summary or key data
64
+ return answer
65
+
66
+ if __name__ == "__main__":
67
+ import argparse
68
+ parser = argparse.ArgumentParser(description="RAG Pipeline for PDF analysis")
69
+ parser.add_argument("--pdf", type=str, required=True, help="Path to the PDF file")
70
+ args = parser.parse_args()
71
+
72
+ pdf_path = args.pdf
73
+
74
+ # Process the PDF and get results
75
+ answer = process_pdf_and_answer(pdf_path)
76
+ print(f"Answer: {answer}")
requirements.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit==1.20.0
2
+ PyPDF2==1.26.0
3
+ python-dotenv==0.20.0
4
+ langchain-groq==0.1.0
5
+ python-docx==0.8.11
6
+ matplotlib==3.5.1
7
+ pandas==1.4.1
8
+ seaborn==0.11.2
9
+ fpdf==1.7.2
10
+ smtplib==0.0.1
11
+ getpass==0.0.1
12
+ streamlit==1.20.0
13
+ requests==2.28.1
14
+ beautifulsoup4==4.11.1
15
+ python-dotenv==0.20.0
16
+ google-auth==2.15.0
17
+ google-auth-oauthlib==0.5.3
18
+ google-api-python-client==2.52.0
19
+ smtplib==0.0.1
20
+ python-dotenv==0.20.0
21
+ transformers==4.26.0
22
+ PyPDF2==1.26.0
23
+ langchain==0.0.200
24
+ faiss-cpu==1.7.2