import gradio as gr import os import matplotlib.pyplot as plt import pandas as pd from langgraph.graph import StateGraph from langgraph.prebuilt import create_react_agent from langgraph.types import Command from langchain_core.messages import HumanMessage from langchain_anthropic import ChatAnthropic # Set the API key from Hugging Face secrets os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY") # Claude 3.5 Sonnet llm = ChatAnthropic(model="claude-3-5-sonnet-latest") # Create the system prompt def make_system_prompt(suffix: str) -> str: return ( "You are a helpful AI assistant, collaborating with other assistants." " Use the provided tools to progress towards answering the question." " If you are unable to fully answer, that's OK—another assistant with different tools" " will help where you left off. Execute what you can to make progress." " If you or any of the other assistants have the final answer or deliverable," " prefix your response with FINAL ANSWER so the team knows to stop." f"\n{suffix}" ) # Workflow node: research def research_node(state): agent = create_react_agent( llm, tools=[], state_modifier=make_system_prompt("You can only do research.") ) result = agent.invoke(state) goto = "chart_generator" if "FINAL ANSWER" not in result["messages"][-1].content else "__end__" result["messages"][-1] = HumanMessage( content=result["messages"][-1].content, name="researcher" ) return Command(update={"messages": result["messages"]}, goto=goto) # Workflow node: chart generation def chart_node(state): agent = create_react_agent( llm, tools=[], state_modifier=make_system_prompt("You can only generate charts.") ) result = agent.invoke(state) result["messages"][-1] = HumanMessage( content=result["messages"][-1].content, name="chart_generator" ) return Command(update={"messages": result["messages"]}, goto="__end__") # LangGraph setup workflow = StateGraph(dict) workflow.add_node("researcher", research_node) workflow.add_node("chart_generator", chart_node) workflow.set_entry_point("researcher") workflow.set_finish_point("__end__") workflow.add_edge("researcher", "chart_generator") graph = workflow.compile() # LangGraph runner def run_langgraph(input_text): try: events = graph.stream({"messages": [("user", input_text)]}) output = [] for event in events: output.append(event) final_response = output[-1]["messages"][-1].content if "FINAL ANSWER" in final_response: # Simulated chart creation from dummy data years = [2020, 2021, 2022, 2023, 2024] gdp = [21.4, 22.0, 23.1, 24.8, 26.2] plt.figure() plt.plot(years, gdp, marker="o") plt.title("USA GDP Over Last 5 Years") plt.xlabel("Year") plt.ylabel("GDP in Trillions USD") plt.grid(True) plt.tight_layout() plt.savefig("gdp_chart.png") return "Chart generated based on FINAL ANSWER.", "gdp_chart.png" else: return final_response, None except Exception as e: return f"Error: {str(e)}", None # Gradio interface def process_input(user_input): return run_langgraph(user_input) interface = gr.Interface( fn=process_input, inputs=gr.Textbox(label="Enter your research task"), outputs=[gr.Textbox(label="Output"), gr.Image(type="filepath", label="Chart")], title="LangGraph Research Automation", description="Enter a research prompt and view chart output when applicable." ) if __name__ == "__main__": interface.launch()