File size: 3,289 Bytes
de9f6ca
 
3e0d28d
 
de9f6ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
import os
from langgraph.graph import StateGraph, START, END
from langgraph.prebuilt import MessagesState, create_react_agent
from langgraph.types import Command
from langchain_core.messages import HumanMessage
from langchain_anthropic import ChatAnthropic

# Set Anthropic API key from Hugging Face secret environment
os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY")

# Initialize Claude 3.5 Sonnet LLM
llm = ChatAnthropic(model="claude-3-5-sonnet-latest")

# System prompt template function
def make_system_prompt(suffix: str) -> str:
    return (
        "You are a helpful AI assistant, collaborating with other assistants."
        " Use the provided tools to progress towards answering the question."
        " If you are unable to fully answer, that's OK, another assistant with different tools "
        " will help where you left off. Execute what you can to make progress."
        " If you or any of the other assistants have the final answer or deliverable,"
        " prefix your response with FINAL ANSWER so the team knows to stop."
        f"\n{suffix}"
    )

# Research agent logic
def research_node(state: MessagesState) -> Command[str]:
    agent = create_react_agent(
        llm,
        tools=[],
        state_modifier=make_system_prompt("You can only do research.")
    )
    result = agent.invoke(state)
    goto = END if "FINAL ANSWER" in result["messages"][-1].content else "chart_generator"
    result["messages"][-1] = HumanMessage(
        content=result["messages"][-1].content, name="researcher"
    )
    return Command(update={"messages": result["messages"]}, goto=goto)

# Chart generator logic
def chart_node(state: MessagesState) -> Command[str]:
    agent = create_react_agent(
        llm,
        tools=[],
        state_modifier=make_system_prompt("You can only generate charts.")
    )
    result = agent.invoke(state)
    goto = END if "FINAL ANSWER" in result["messages"][-1].content else "researcher"
    result["messages"][-1] = HumanMessage(
        content=result["messages"][-1].content, name="chart_generator"
    )
    return Command(update={"messages": result["messages"]}, goto=goto)

# Build the LangGraph workflow
workflow = StateGraph(MessagesState)
workflow.add_node("researcher", research_node)
workflow.add_node("chart_generator", chart_node)
workflow.add_edge(START, "researcher")
workflow.add_edge("researcher", "chart_generator")
workflow.add_edge("chart_generator", END)
graph = workflow.compile()

# Function to execute LangGraph flow
def run_langgraph(user_input):
    events = graph.stream(
        {"messages": [("user", user_input)]},
        {"recursion_limit": 150}
    )
    output = []
    for event in events:
        output.append(event)
    return output[-1]["messages"][-1].content if output else "No output generated"

# Gradio interface logic
def process_input(user_input):
    return run_langgraph(user_input)

# Launch Gradio app
interface = gr.Interface(
    fn=process_input,
    inputs="text",
    outputs="text",
    title="LangGraph Research Automation",
    description="Enter your research task (e.g., 'Get GDP data for the USA over the past 5 years and create a chart.')"
)

if __name__ == "__main__":
    interface.launch(server_name="0.0.0.0", server_port=7860)