import os from dotenv import load_dotenv # Load environment variables load_dotenv() # Set protobuf implementation to avoid C++ extension issues os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" # Load keys from environment groq_api_key = os.getenv("GROQ_API_KEY") serper_api_key = os.getenv("SERPER_API_KEY") hf_token = os.getenv("HUGGINGFACE_INFERENCE_TOKEN") # ---- Imports ---- from langgraph.graph import START, StateGraph, MessagesState from langgraph.prebuilt import tools_condition, ToolNode from langchain_google_genai import ChatGoogleGenerativeAI from langchain_groq import ChatGroq from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings from langchain_community.tools.tavily_search import TavilySearchResults from langchain_community.document_loaders import WikipediaLoader, ArxivLoader from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import SystemMessage, HumanMessage from langchain_core.tools import tool from langchain.tools.retriever import create_retriever_tool from langchain.vectorstores import Chroma from langchain.embeddings import HuggingFaceEmbeddings from langchain.schema import Document import json # ---- Tools ---- @tool def multiply(a: int, b: int) -> int: return a * b @tool def add(a: int, b: int) -> int: return a + b @tool def subtract(a: int, b: int) -> int: return a - b @tool def divide(a: int, b: int) -> float: if b == 0: raise ValueError("Cannot divide by zero.") return a / b @tool def modulus(a: int, b: int) -> int: return a % b @tool def wiki_search(query: str) -> str: search_docs = WikipediaLoader(query=query, load_max_docs=2).load() formatted = "\n\n---\n\n".join( [ f'\n{doc.page_content}\n' for doc in search_docs ] ) return {"wiki_results": formatted} @tool def web_search(query: str) -> str: search_docs = TavilySearchResults(max_results=3).invoke(query=query) formatted = "\n\n---\n\n".join( [ f'\n{doc.page_content}\n' for doc in search_docs ] ) return {"web_results": formatted} @tool def arvix_search(query: str) -> str: search_docs = ArxivLoader(query=query, load_max_docs=3).load() formatted = "\n\n---\n\n".join( [ f'\n{doc.page_content[:1000]}\n' for doc in search_docs ] ) return {"arvix_results": formatted} # ---- Embedding & Vector Store Setup ---- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") json_QA = [] with open('metadata.jsonl', 'r') as jsonl_file: for line in jsonl_file: json_QA.append(json.loads(line)) documents = [ Document( page_content=f"Question : {sample['Question']}\n\nFinal answer : {sample['Final answer']}", metadata={"source": sample["task_id"]} ) for sample in json_QA ] vector_store = Chroma.from_documents( documents=documents, embedding=embeddings, persist_directory="./chroma_db", collection_name="my_collection" ) vector_store.persist() print("Documents inserted:", vector_store._collection.count()) @tool def similar_question_search(query: str) -> str: matched_docs = vector_store.similarity_search(query, 3) formatted = "\n\n---\n\n".join( [ f'\n{doc.page_content[:1000]}\n' for doc in matched_docs ] ) return {"similar_questions": formatted} # ---- System Prompt ---- system_prompt = """ You are a helpful assistant tasked with answering questions using a set of tools. Now, I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings... """ sys_msg = SystemMessage(content=system_prompt) # ---- Tool List ---- tools = [ multiply, add, subtract, divide, modulus, wiki_search, web_search, arvix_search, similar_question_search ] # ---- Graph Definition ---- def build_graph(provider: str = "groq"): if provider == "groq": llm = ChatGroq(model="qwen-qwq-32b", temperature=0, api_key=groq_api_key) elif provider == "google": llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0) elif provider == "huggingface": llm = ChatHuggingFace( llm=HuggingFaceEndpoint(repo_id="mosaicml/mpt-30b", temperature=0) ) else: raise ValueError("Invalid provider: choose 'groq', 'google', or 'huggingface'.") llm_with_tools = llm.bind_tools(tools) def assistant(state: MessagesState): return {"messages": [llm_with_tools.invoke(state["messages"])]} def retriever(state: MessagesState): similar = vector_store.similarity_search(state["messages"][0].content) if similar: example_msg = HumanMessage(content=f"Here is a similar question:\n\n{similar[0].page_content}") return {"messages": [sys_msg] + state["messages"] + [example_msg]} return {"messages": [sys_msg] + state["messages"]} builder = StateGraph(MessagesState) builder.add_node("retriever", retriever) builder.add_node("assistant", assistant) builder.add_node("tools", ToolNode(tools)) builder.add_edge(START, "retriever") builder.add_edge("retriever", "assistant") builder.add_conditional_edges("assistant", tools_condition) builder.add_edge("tools", "assistant") return builder.compile()