Spaces:
Sleeping
Sleeping
File size: 16,261 Bytes
1beb2b7 2a5f03d 1beb2b7 2a5f03d 1beb2b7 2a5f03d 1343672 2a5f03d 1beb2b7 2a5f03d 1beb2b7 2a5f03d 1beb2b7 2a5f03d 6926b19 1beb2b7 f69bafa 1beb2b7 f69bafa 2a5f03d 1beb2b7 f69bafa 1beb2b7 f69bafa 1beb2b7 f69bafa 1beb2b7 2a5f03d 1beb2b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 |
import streamlit as st
from agno.agent import Agent
from agno.knowledge.pdf import PDFKnowledgeBase, PDFReader
from agno.vectordb.qdrant import Qdrant
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.models.openai import OpenAIChat
from agno.embedder.openai import OpenAIEmbedder
import tempfile
import os
#initializing the session state variables
def init_session_state():
"""Initialize session state variables"""
if 'openai_api_key' not in st.session_state:
st.session_state.openai_api_key = None
if 'qdrant_api_key' not in st.session_state:
st.session_state.qdrant_api_key = None
if 'qdrant_url' not in st.session_state:
st.session_state.qdrant_url = None
if 'vector_db' not in st.session_state:
st.session_state.vector_db = None
if 'legal_team' not in st.session_state:
st.session_state.legal_team = None
if 'knowledge_base' not in st.session_state:
st.session_state.knowledge_base = None
def init_qdrant():
"""Initialize Qdrant vector database"""
if not st.session_state.qdrant_api_key:
raise ValueError("Qdrant API key not provided")
if not st.session_state.qdrant_url:
raise ValueError("Qdrant URL not provided")
return Qdrant(
collection="apaai",
url=st.session_state.qdrant_url,
api_key=st.session_state.qdrant_api_key,
https=True,
timeout=None,
distance="cosine"
)
def process_document(uploaded_file, vector_db: Qdrant):
"""Process document, create embeddings and store in Qdrant vector database"""
if not st.session_state.openai_api_key:
raise ValueError("OpenAI API key not provided")
os.environ['OPENAI_API_KEY'] = st.session_state.openai_api_key
with tempfile.TemporaryDirectory() as temp_dir:
temp_file_path = os.path.join(temp_dir, uploaded_file.name)
with open(temp_file_path, "wb") as f:
f.write(uploaded_file.getbuffer())
try:
embedder = OpenAIEmbedder(
api_key=st.session_state.openai_api_key
)
# Creating knowledge base with explicit Qdrant configuration
knowledge_base = PDFKnowledgeBase(
path=temp_dir,
vector_db=vector_db,
reader=PDFReader(chunk=True),
embedder=embedder,
recreate_vector_db=True
)
knowledge_base.load()
return knowledge_base
except Exception as e:
raise Exception(f"Error processing document: {str(e)}")
def main():
st.set_page_config(page_title="Legal Document Analyzer", layout="wide")
init_session_state()
st.title("AI Legal Agent Team π¨ββοΈ")
with st.sidebar:
st.header("π API Configuration")
openai_key = st.text_input(
"OpenAI API Key",
type="password",
value=st.session_state.openai_api_key if st.session_state.openai_api_key else "",
help="Enter your OpenAI API key"
)
if openai_key:
st.session_state.openai_api_key = openai_key
qdrant_key = st.text_input(
"Qdrant API Key",
type="password",
value=st.session_state.qdrant_api_key if st.session_state.qdrant_api_key else "",
help="Enter your Qdrant API key"
)
if qdrant_key:
st.session_state.qdrant_api_key = qdrant_key
qdrant_url = st.text_input(
"Qdrant URL",
value=st.session_state.qdrant_url if st.session_state.qdrant_url else "https://f912a67e-0c79-4bbd-9c37-05745c0e7663.us-east-1-0.aws.cloud.qdrant.io:6333",
help="Enter your Qdrant instance URL"
)
if qdrant_url:
st.session_state.qdrant_url = qdrant_url
if all([st.session_state.qdrant_api_key, st.session_state.qdrant_url]):
try:
if not st.session_state.vector_db:
st.session_state.vector_db = init_qdrant()
st.success("Successfully connected to Qdrant!")
except Exception as e:
st.error(f"Failed to connect to Qdrant: {str(e)}")
st.divider()
if all([st.session_state.openai_api_key, st.session_state.vector_db]):
st.header("π Document Upload")
uploaded_file = st.file_uploader("Upload Legal Document", type=['pdf'])
if uploaded_file:
with st.spinner("Processing document..."):
try:
knowledge_base = process_document(uploaded_file, st.session_state.vector_db)
st.session_state.knowledge_base = knowledge_base
# Initialize agents
legal_researcher = Agent(
name="Legal Researcher",
role="Legal research specialist",
model=OpenAIChat(),
tools=[DuckDuckGoTools()],
knowledge=st.session_state.knowledge_base,
search_knowledge=True,
instructions=[
"Find and cite relevant legal cases and precedents",
"Provide detailed research summaries with sources",
"Reference specific sections from the uploaded document",
"Always search the knowledge base for relevant information"
],
show_tool_calls=True,
markdown=True
)
contract_analyst = Agent(
name="Contract Analyst",
role="Contract analysis specialist",
model=OpenAIChat(),
knowledge=knowledge_base,
search_knowledge=True,
instructions=[
"Review contracts thoroughly",
"Identify key terms and potential issues",
"Reference specific clauses from the document"
],
markdown=True
)
legal_strategist = Agent(
name="Legal Strategist",
role="Legal strategy specialist",
model=OpenAIChat(),
knowledge=knowledge_base,
search_knowledge=True,
instructions=[
"Develop comprehensive legal strategies",
"Provide actionable recommendations",
"Consider both risks and opportunities"
],
markdown=True
)
# Legal Agent Team
st.session_state.legal_team = Agent(
name="Legal Team Lead",
role="Legal team coordinator",
model=OpenAIChat(),
team=[legal_researcher, contract_analyst, legal_strategist],
knowledge=st.session_state.knowledge_base,
search_knowledge=True,
instructions=[
"Coordinate analysis between team members",
"Provide comprehensive responses",
"Ensure all recommendations are properly sourced",
"Reference specific parts of the uploaded document",
"Always search the knowledge base before delegating tasks"
],
show_tool_calls=True,
markdown=True
)
st.success("β
Document processed and team initialized!")
except Exception as e:
st.error(f"Error processing document: {str(e)}")
st.divider()
st.header("π Analysis Options")
analysis_type = st.selectbox(
"Select Analysis Type",
[
"Contract Review",
"Legal Research",
"Risk Assessment",
"Compliance Check",
"Custom Query"
]
)
else:
st.warning("Please configure all API credentials to proceed")
# Main content area
if not all([st.session_state.openai_api_key, st.session_state.vector_db]):
st.info("π Please configure your API credentials in the sidebar to begin")
elif not uploaded_file:
st.info("π Please upload a legal document to begin analysis")
elif st.session_state.legal_team:
# Create a dictionary for analysis type icons
analysis_icons = {
"Contract Review": "π",
"Legal Research": "π",
"Risk Assessment": "β οΈ",
"Compliance Check": "β
",
"Custom Query": "π"
}
# Dynamic header with icon
st.header(f"{analysis_icons[analysis_type]} {analysis_type} Analysis")
analysis_configs = {
"Contract Review": {
"query": "Review this contract and identify key terms, obligations, and potential issues.",
"agents": ["Contract Analyst"],
"description": "Detailed contract analysis focusing on terms and obligations"
},
"Legal Research": {
"query": "Research relevant cases and precedents related to this document.",
"agents": ["Legal Researcher"],
"description": "Research on relevant legal cases and precedents"
},
"Risk Assessment": {
"query": "Analyze potential legal risks and liabilities in this document.",
"agents": ["Contract Analyst", "Legal Strategist"],
"description": "Combined risk analysis and strategic assessment"
},
"Compliance Check": {
"query": "Check this document for regulatory compliance issues.",
"agents": ["Legal Researcher", "Contract Analyst", "Legal Strategist"],
"description": "Comprehensive compliance analysis"
},
"Custom Query": {
"query": None,
"agents": ["Legal Researcher", "Contract Analyst", "Legal Strategist"],
"description": "Custom analysis using all available agents"
}
}
st.info(f"π {analysis_configs[analysis_type]['description']}")
st.write(f"π€ Active Legal AI Agents: {', '.join(analysis_configs[analysis_type]['agents'])}") #dictionary!!
# Replace the existing user_query section with this:
if analysis_type == "Custom Query":
user_query = st.text_area(
"Enter your specific query:",
help="Add any specific questions or points you want to analyze"
)
else:
user_query = None # Set to None for non-custom queries
if st.button("Analyze"):
if analysis_type == "Custom Query" and not user_query:
st.warning("Please enter a query")
else:
with st.spinner("Analyzing document..."):
try:
# Ensure OpenAI API key is set
os.environ['OPENAI_API_KEY'] = st.session_state.openai_api_key
# Combine predefined and user queries
if analysis_type != "Custom Query":
combined_query = f"""
Using the uploaded document as reference:
Primary Analysis Task: {analysis_configs[analysis_type]['query']}
Focus Areas: {', '.join(analysis_configs[analysis_type]['agents'])}
Please search the knowledge base and provide specific references from the document.
"""
else:
combined_query = f"""
Using the uploaded document as reference:
{user_query}
Please search the knowledge base and provide specific references from the document.
Focus Areas: {', '.join(analysis_configs[analysis_type]['agents'])}
"""
response = st.session_state.legal_team.run(combined_query)
# Display results in tabs
tabs = st.tabs(["Analysis", "Key Points", "Recommendations"])
with tabs[0]:
st.markdown("### Detailed Analysis")
if response.content:
st.markdown(response.content)
else:
for message in response.messages:
if message.role == 'assistant' and message.content:
st.markdown(message.content)
with tabs[1]:
st.markdown("### Key Points")
key_points_response = st.session_state.legal_team.run(
f"""Based on this previous analysis:
{response.content}
Please summarize the key points in bullet points.
Focus on insights from: {', '.join(analysis_configs[analysis_type]['agents'])}"""
)
if key_points_response.content:
st.markdown(key_points_response.content)
else:
for message in key_points_response.messages:
if message.role == 'assistant' and message.content:
st.markdown(message.content)
with tabs[2]:
st.markdown("### Recommendations")
recommendations_response = st.session_state.legal_team.run(
f"""Based on this previous analysis:
{response.content}
What are your key recommendations based on the analysis, the best course of action?
Provide specific recommendations from: {', '.join(analysis_configs[analysis_type]['agents'])}"""
)
if recommendations_response.content:
st.markdown(recommendations_response.content)
else:
for message in recommendations_response.messages:
if message.role == 'assistant' and message.content:
st.markdown(message.content)
except Exception as e:
st.error(f"Error during analysis: {str(e)}")
else:
st.info("Please upload a legal document to begin analysis")
if __name__ == "__main__":
main() |