Spaces:
Sleeping
Sleeping
File size: 4,600 Bytes
f0a9ca2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
from typing import TypedDict, Annotated, List
from typing_extensions import List, TypedDict
from dotenv import load_dotenv
import chainlit as cl
import operator
from langchain.prompts import ChatPromptTemplate
from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_cohere import CohereRerank
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.tools.arxiv.tool import ArxivQueryRun
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.documents import Document
from langchain_core.messages import BaseMessage, HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_qdrant import QdrantVectorStore
from langgraph.graph import START, StateGraph, END
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode
from qdrant_client import QdrantClient
from qdrant_client.http.models import Distance, VectorParams
load_dotenv()
path = "data/"
loader = DirectoryLoader(path, glob="*.html")
docs = loader.load()
tavily_tool = TavilySearchResults(max_results=5)
arxiv_tool = ArxivQueryRun()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=750, chunk_overlap=100)
split_documents = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
client = QdrantClient(":memory:")
client.create_collection(
collection_name="ai_across_years",
vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
)
vector_store = QdrantVectorStore(
client=client,
collection_name="ai_across_years",
embedding=embeddings,
)
_ = vector_store.add_documents(documents=split_documents)
retriever = vector_store.as_retriever(search_kwargs={"k": 5})
def retrieve(state):
retrieved_docs = retriever.invoke(state["question"])
return {"context" : retrieved_docs}
RAG_PROMPT = """\
You are a helpful assistant who answers questions based on provided context. You must only use the provided context, and cannot use your own knowledge.
### Question
{question}
### Context
{context}
"""
rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)
llm = ChatOpenAI(model="gpt-4o-mini")
def generate(state):
docs_content = "\n\n".join(doc.page_content for doc in state["context"])
messages = rag_prompt.format_messages(question=state["question"], context=docs_content)
response = llm.invoke(messages)
return {"response" : response.content}
from langgraph.graph import START, StateGraph
from typing_extensions import List, TypedDict
from langchain_core.documents import Document
class State(TypedDict):
question: str
context: List[Document]
response: str
graph_builder = StateGraph(State).add_sequence([retrieve, generate])
graph_builder.add_edge(START, "retrieve")
graph = graph_builder.compile()
@tool
def ai_rag_tool(question: str) -> str:
"""Useful for when you need to answer questions about artificial intelligence. Input should be a fully formed question."""
response = graph.invoke({"question" : question})
return {
"messages": [HumanMessage(content=response["response"])],
"context": response["context"]
}
tool_belt = [
tavily_tool,
arxiv_tool,
ai_rag_tool
]
model = ChatOpenAI(model="gpt-4o", temperature=0)
model = model.bind_tools(tool_belt)
class AgentState(TypedDict):
messages: Annotated[list, add_messages]
context: List[Document]
tool_node = ToolNode(tool_belt)
uncompiled_graph = StateGraph(AgentState)
def call_model(state):
messages = state["messages"]
response = model.invoke(messages)
return {
"messages": [response],
"context": state.get("context", [])
}
uncompiled_graph.add_node("agent", call_model)
uncompiled_graph.add_node("action", tool_node)
uncompiled_graph.set_entry_point("agent")
def should_continue(state):
last_message = state["messages"][-1]
if last_message.tool_calls:
return "action"
return END
uncompiled_graph.add_conditional_edges(
"agent",
should_continue
)
uncompiled_graph.add_edge("action", "agent")
compiled_graph = uncompiled_graph.compile()
@cl.on_chat_start
async def start():
cl.user_session.set("graph", compiled_graph)
@cl.on_message
async def handle(message: cl.Message):
graph = cl.user_session.get("graph")
state = {"messages" : [HumanMessage(content=message.content)]}
response = await graph.ainvoke(state)
await cl.Message(content=response["messages"][-1].content).send() |