Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
from langgraph.graph import StateGraph | |
from langgraph.prebuilt import MessagesState, START, END, create_react_agent | |
from langgraph.types import Command | |
from langchain_core.messages import HumanMessage | |
from langchain_anthropic import ChatAnthropic | |
# Set Anthropic API key from Hugging Face secret environment | |
os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY") | |
# Initialize Claude 3.5 Sonnet LLM | |
llm = ChatAnthropic(model="claude-3-5-sonnet-latest") | |
# System prompt template function | |
def make_system_prompt(suffix: str) -> str: | |
return ( | |
"You are a helpful AI assistant, collaborating with other assistants." | |
" Use the provided tools to progress towards answering the question." | |
" If you are unable to fully answer, that's OK, another assistant with different tools " | |
" will help where you left off. Execute what you can to make progress." | |
" If you or any of the other assistants have the final answer or deliverable," | |
" prefix your response with FINAL ANSWER so the team knows to stop." | |
f"\n{suffix}" | |
) | |
# Research agent logic | |
def research_node(state: MessagesState) -> Command[str]: | |
agent = create_react_agent( | |
llm, | |
tools=[], | |
state_modifier=make_system_prompt("You can only do research.") | |
) | |
result = agent.invoke(state) | |
goto = END if "FINAL ANSWER" in result["messages"][-1].content else "chart_generator" | |
result["messages"][-1] = HumanMessage( | |
content=result["messages"][-1].content, name="researcher" | |
) | |
return Command(update={"messages": result["messages"]}, goto=goto) | |
# Chart generator logic | |
def chart_node(state: MessagesState) -> Command[str]: | |
agent = create_react_agent( | |
llm, | |
tools=[], | |
state_modifier=make_system_prompt("You can only generate charts.") | |
) | |
result = agent.invoke(state) | |
goto = END if "FINAL ANSWER" in result["messages"][-1].content else "researcher" | |
result["messages"][-1] = HumanMessage( | |
content=result["messages"][-1].content, name="chart_generator" | |
) | |
return Command(update={"messages": result["messages"]}, goto=goto) | |
# Build the LangGraph workflow | |
workflow = StateGraph(MessagesState) | |
workflow.add_node("researcher", research_node) | |
workflow.add_node("chart_generator", chart_node) | |
workflow.add_edge(START, "researcher") | |
workflow.add_edge("researcher", "chart_generator") | |
workflow.add_edge("chart_generator", END) | |
graph = workflow.compile() | |
# Function to execute LangGraph flow | |
def run_langgraph(user_input): | |
events = graph.stream( | |
{"messages": [("user", user_input)]}, | |
{"recursion_limit": 150} | |
) | |
output = [] | |
for event in events: | |
output.append(event) | |
return output[-1]["messages"][-1].content if output else "No output generated" | |
# Gradio interface logic | |
def process_input(user_input): | |
return run_langgraph(user_input) | |
# Launch Gradio app | |
interface = gr.Interface( | |
fn=process_input, | |
inputs="text", | |
outputs="text", | |
title="LangGraph Research Automation", | |
description="Enter your research task (e.g., 'Get GDP data for the USA over the past 5 years and create a chart.')" | |
) | |
if __name__ == "__main__": | |
interface.launch(server_name="0.0.0.0", server_port=7860) | |