Spaces:
Sleeping
Sleeping
Update rag_langgraph.py
Browse files- rag_langgraph.py +10 -20
rag_langgraph.py
CHANGED
|
@@ -17,6 +17,8 @@ import functools
|
|
| 17 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 18 |
from langgraph.graph import StateGraph, END
|
| 19 |
|
|
|
|
|
|
|
| 20 |
class AgentState(TypedDict):
|
| 21 |
messages: Annotated[Sequence[BaseMessage], operator.add]
|
| 22 |
next: str
|
|
@@ -24,10 +26,7 @@ class AgentState(TypedDict):
|
|
| 24 |
def create_agent(llm: ChatOpenAI, tools: list, system_prompt: str):
|
| 25 |
prompt = ChatPromptTemplate.from_messages(
|
| 26 |
[
|
| 27 |
-
(
|
| 28 |
-
"system",
|
| 29 |
-
system_prompt
|
| 30 |
-
),
|
| 31 |
MessagesPlaceholder(variable_name="messages"),
|
| 32 |
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
| 33 |
]
|
|
@@ -46,7 +45,7 @@ def create_graph(topic, word_count):
|
|
| 46 |
members = ["Blogger"]
|
| 47 |
|
| 48 |
system_prompt = (
|
| 49 |
-
"You are a
|
| 50 |
" following workers: {members}. Given the following user request,"
|
| 51 |
" respond with the worker to act next. Each worker will perform a"
|
| 52 |
" task and respond with their results and status. When finished,"
|
|
@@ -85,7 +84,7 @@ def create_graph(topic, word_count):
|
|
| 85 |
]
|
| 86 |
).partial(options=str(options), members=", ".join(members))
|
| 87 |
|
| 88 |
-
llm = ChatOpenAI(model=
|
| 89 |
|
| 90 |
supervisor_chain = (
|
| 91 |
prompt
|
|
@@ -93,14 +92,14 @@ def create_graph(topic, word_count):
|
|
| 93 |
| JsonOutputFunctionsParser()
|
| 94 |
)
|
| 95 |
|
| 96 |
-
|
| 97 |
-
|
| 98 |
|
| 99 |
-
blogger_agent = create_agent(llm,
|
| 100 |
blogger_node = functools.partial(agent_node, agent=blogger_agent, name="Blogger")
|
| 101 |
|
| 102 |
workflow = StateGraph(AgentState)
|
| 103 |
-
|
| 104 |
workflow.add_node("Blogger", blogger_node)
|
| 105 |
workflow.add_node("Manager", supervisor_chain)
|
| 106 |
|
|
@@ -119,16 +118,7 @@ def run_multi_agent(topic, word_count):
|
|
| 119 |
graph = create_graph(topic, word_count)
|
| 120 |
result = graph.invoke({
|
| 121 |
"messages": [
|
| 122 |
-
HumanMessage(content=
|
| 123 |
]
|
| 124 |
})
|
| 125 |
-
print("###")
|
| 126 |
-
print(result)
|
| 127 |
-
print("###")
|
| 128 |
-
print(result['messages'])
|
| 129 |
-
print("###")
|
| 130 |
-
print(result['messages'][1])
|
| 131 |
-
print("###")
|
| 132 |
-
print(result['messages'][1].content)
|
| 133 |
-
print("###")
|
| 134 |
return result['messages'][1].content
|
|
|
|
| 17 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 18 |
from langgraph.graph import StateGraph, END
|
| 19 |
|
| 20 |
+
LLM = "gpt-4o"
|
| 21 |
+
|
| 22 |
class AgentState(TypedDict):
|
| 23 |
messages: Annotated[Sequence[BaseMessage], operator.add]
|
| 24 |
next: str
|
|
|
|
| 26 |
def create_agent(llm: ChatOpenAI, tools: list, system_prompt: str):
|
| 27 |
prompt = ChatPromptTemplate.from_messages(
|
| 28 |
[
|
| 29 |
+
("system", system_prompt),
|
|
|
|
|
|
|
|
|
|
| 30 |
MessagesPlaceholder(variable_name="messages"),
|
| 31 |
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
| 32 |
]
|
|
|
|
| 45 |
members = ["Blogger"]
|
| 46 |
|
| 47 |
system_prompt = (
|
| 48 |
+
"You are a manager tasked with managing a conversation between the"
|
| 49 |
" following workers: {members}. Given the following user request,"
|
| 50 |
" respond with the worker to act next. Each worker will perform a"
|
| 51 |
" task and respond with their results and status. When finished,"
|
|
|
|
| 84 |
]
|
| 85 |
).partial(options=str(options), members=", ".join(members))
|
| 86 |
|
| 87 |
+
llm = ChatOpenAI(model=LLM)
|
| 88 |
|
| 89 |
supervisor_chain = (
|
| 90 |
prompt
|
|
|
|
| 92 |
| JsonOutputFunctionsParser()
|
| 93 |
)
|
| 94 |
|
| 95 |
+
research_agent = create_agent(llm, [tavily_tool], f"Research content on topic {topic}, prioritizing research papers.")
|
| 96 |
+
research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")
|
| 97 |
|
| 98 |
+
blogger_agent = create_agent(llm, f"Write a {word_count}-word blog post on topic {topic}. Add a references section with research papers.")
|
| 99 |
blogger_node = functools.partial(agent_node, agent=blogger_agent, name="Blogger")
|
| 100 |
|
| 101 |
workflow = StateGraph(AgentState)
|
| 102 |
+
workflow.add_node("Researcher", research_node)
|
| 103 |
workflow.add_node("Blogger", blogger_node)
|
| 104 |
workflow.add_node("Manager", supervisor_chain)
|
| 105 |
|
|
|
|
| 118 |
graph = create_graph(topic, word_count)
|
| 119 |
result = graph.invoke({
|
| 120 |
"messages": [
|
| 121 |
+
HumanMessage(content=topic)
|
| 122 |
]
|
| 123 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
return result['messages'][1].content
|