ZDPLI commited on
Commit
d3ebdbc
ยท
verified ยท
1 Parent(s): b6218c3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +192 -0
app.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import torch
4
+ import gradio as gr
5
+ from tqdm import tqdm
6
+ from PIL import Image
7
+
8
+ # LangChain & LangGraph
9
+ from langgraph.graph import StateGraph
10
+ from langgraph.checkpoint.memory import MemorySaver
11
+ from langchain.tools import tool
12
+ from langchain_community.vectorstores import FAISS
13
+ from langchain_community.embeddings import HuggingFaceEmbeddings
14
+ from langchain_community.document_loaders import PyPDFLoader
15
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
16
+
17
+ # Web Search
18
+ from duckduckgo_search import DDGS
19
+
20
+ # Llama GGUF Model Loader
21
+ from llama_cpp import Llama
22
+
23
+ # ------------------------------
24
+ # ๐Ÿ”น Setup Logging
25
+ # ------------------------------
26
+ logging.basicConfig(level=logging.INFO)
27
+ logger = logging.getLogger(__name__)
28
+
29
+ # ------------------------------
30
+ # ๐Ÿ”น Load GGUF Model with llama-cpp-python
31
+ # ------------------------------
32
+ model_path = "./Bio-Medical-MultiModal-Llama-3-8B-V1.i1-Q6_K.gguf" # Update with actual GGUF model path
33
+ llm = Llama(model_path=model_path, n_ctx=2048, n_gpu_layers=35) # Optimized for Hugging Face T4 GPU
34
+
35
+ logger.info("Llama GGUF Model Loaded Successfully.")
36
+
37
+ # ------------------------------
38
+ # ๐Ÿ”น Define Expert System Prompts
39
+ # ------------------------------
40
+ GP_PROMPT = "You are a General Practitioner AI Assistant. Answer medical questions with scientifically accurate information."
41
+ RADIOLOGY_PROMPT = "You are a Radiology AI expert. Analyze medical images and provide diagnostic insights."
42
+ WEBSEARCH_PROMPT = "You are a Web Search AI. Retrieve up-to-date medical information."
43
+
44
+ # ------------------------------
45
+ # ๐Ÿ”น FAISS Vector Store for RAG
46
+ # ------------------------------
47
+ _vector_store_cache = None
48
+
49
+ def load_vectorstore(pdf_path="medical_docs.pdf"):
50
+ """Loads PDF files into a FAISS vector store for RAG."""
51
+ try:
52
+ loader = PyPDFLoader(pdf_path)
53
+ documents = loader.load()
54
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)
55
+ docs = text_splitter.split_documents(documents)
56
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
57
+ vector_store = FAISS.from_documents(docs, embeddings)
58
+ logger.info(f"Vector store loaded with {len(docs)} documents.")
59
+ return vector_store
60
+ except Exception as e:
61
+ logger.error(f"Error loading vector store: {str(e)}")
62
+ return None
63
+
64
+ def update_vector_store(pdf_file):
65
+ """Updates FAISS vector store when a new PDF is uploaded."""
66
+ pdf_path = "uploaded_medical_docs.pdf"
67
+ try:
68
+ with open(pdf_path, "wb") as f:
69
+ f.write(pdf_file.read())
70
+ vector_store = load_vectorstore(pdf_path)
71
+ os.remove(pdf_path) # Clean up
72
+ return vector_store
73
+ except Exception as e:
74
+ logger.error(f"Error updating vector store: {str(e)}")
75
+ return _vector_store_cache # Fallback to cached version
76
+
77
+ if os.path.exists("medical_docs.pdf"):
78
+ _vector_store_cache = load_vectorstore("medical_docs.pdf")
79
+ else:
80
+ _vector_store_cache = None
81
+
82
+ vector_store = _vector_store_cache
83
+
84
+ # ------------------------------
85
+ # ๐Ÿ”น Define AI Tools
86
+ # ------------------------------
87
+ @tool
88
+ def analyze_medical_image(image_path: str):
89
+ """Analyzes a medical image and returns a diagnostic explanation."""
90
+ try:
91
+ image = Image.open(image_path)
92
+ except Exception as e:
93
+ logger.error(f"Error opening image: {str(e)}")
94
+ return "Error processing image."
95
+
96
+ # Process image using Llama GGUF model
97
+ output = llm(f"Analyze this medical image and provide a diagnosis:\n{image}")
98
+ return output["choices"][0]["text"]
99
+
100
+ @tool
101
+ def retrieve_medical_knowledge(query: str):
102
+ """Retrieves medical knowledge from FAISS vector store."""
103
+ if vector_store is None:
104
+ return "No external medical knowledge available."
105
+
106
+ retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 3})
107
+ docs = retriever.get_relevant_documents(query)
108
+ citations = [f"[{i+1}] {doc.metadata.get('source', 'Unknown Source')}" for i, doc in enumerate(docs)]
109
+ citations_text = "\n".join(citations)
110
+ content = "\n".join([doc.page_content for doc in docs])
111
+ return content + f"\n\n**Citations:**\n{citations_text}"
112
+
113
+ @tool
114
+ def web_search(query: str):
115
+ """Performs a real-time web search using DuckDuckGo."""
116
+ try:
117
+ results = ddg(query, max_results=3)
118
+ summary = "\n".join([f"{r['title']}: {r['body']} ({r['href']})" for r in results]) or "No relevant results found."
119
+ return summary
120
+ except Exception as e:
121
+ logger.error(f"Web search error: {str(e)}")
122
+ return "Error retrieving web search results."
123
+
124
+ # ------------------------------
125
+ # ๐Ÿ”น Define Multi-Agent Workflow (LangGraph)
126
+ # ------------------------------
127
+ class AgentState:
128
+ def __init__(self, query="", response="", image_path=None, expert=""):
129
+ self.query = query
130
+ self.response = response
131
+ self.image_path = image_path
132
+ self.expert = expert # "GP", "Radiology", "Web Search"
133
+
134
+ # Memory checkpointing
135
+ checkpointer = MemorySaver()
136
+
137
+ # Create LangGraph state graph
138
+ agent_graph = StateGraph(AgentState)
139
+
140
+ def route_query(state: AgentState):
141
+ """Determines which AI expert should handle the query."""
142
+ if state.image_path:
143
+ return "radiology_specialist"
144
+ elif any(word in state.query.lower() for word in ["latest", "update", "breaking news"]):
145
+ return "web_search_expert"
146
+ else:
147
+ return "general_practitioner"
148
+
149
+ def general_practitioner(state: AgentState):
150
+ """GP Expert: Handles medical text queries and retrieves knowledge."""
151
+ query = state.query
152
+ retrieved_info = retrieve_medical_knowledge.run(query)
153
+ output = llm(f"{GP_PROMPT}\nQ: {query}\nA:")
154
+ return AgentState(query=query, response=output["choices"][0]["text"] + "\n\n" + retrieved_info, expert="GP")
155
+
156
+ def radiology_specialist(state: AgentState):
157
+ """Radiology Expert: Analyzes medical images."""
158
+ image_analysis = analyze_medical_image.run(state.image_path)
159
+ return AgentState(query=state.query, response=image_analysis, expert="Radiology")
160
+
161
+ def web_search_expert(state: AgentState):
162
+ """Web Search Expert: Retrieves the latest information."""
163
+ search_result = web_search.run(state.query)
164
+ return AgentState(query=state.query, response=search_result, expert="Web Search")
165
+
166
+ # Add nodes
167
+ agent_graph.add_node("general_practitioner", general_practitioner)
168
+ agent_graph.add_node("radiology_specialist", radiology_specialist)
169
+ agent_graph.add_node("web_search_expert", web_search_expert)
170
+ agent_graph.add_conditional_edges("route_query", route_query, {"general_practitioner", "radiology_specialist", "web_search_expert"})
171
+ agent_graph.set_entry_point("route_query")
172
+
173
+ # Compile graph
174
+ app = agent_graph.compile(checkpointer=checkpointer)
175
+
176
+ # ------------------------------
177
+ # ๐Ÿ”น Gradio Interface
178
+ # ------------------------------
179
+ with gr.Blocks(title="Llama3-Med Multi-Agent AI") as demo:
180
+ gr.Markdown("# ๐Ÿฅ AI Medical Assistant")
181
+
182
+ with gr.Row():
183
+ user_input = gr.Textbox(label="Your Question")
184
+ image_file = gr.Image(label="Upload Medical Image (Optional)", type="file")
185
+ pdf_file = gr.File(label="Upload PDF (Optional)", file_types=[".pdf"])
186
+ submit_btn = gr.Button("Submit")
187
+ output_text = gr.Textbox(label="Assistant's Response", interactive=False)
188
+
189
+ submit_btn.click(fn=chat_with_agent, inputs=[user_input, image_file, pdf_file], outputs=output_text)
190
+
191
+ if __name__ == "__main__":
192
+ demo.launch()