Spaces:
Runtime error
Runtime error
File size: 3,773 Bytes
de9f6ca d1d9178 1effecd de9f6ca 1effecd de9f6ca 1effecd de9f6ca 1effecd de9f6ca 1effecd 173c87e de9f6ca 1effecd de9f6ca 1effecd de9f6ca 1effecd de9f6ca d85a20b 1effecd de9f6ca 1effecd d1d9178 1effecd d85a20b 1effecd d85a20b 1effecd d85a20b 1effecd d85a20b 1effecd d85a20b 1effecd d1d9178 d85a20b 1effecd d85a20b de9f6ca d85a20b de9f6ca d1d9178 de9f6ca d85a20b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
import gradio as gr
import os
import matplotlib.pyplot as plt
import pandas as pd
from langgraph.graph import StateGraph, END
from langgraph.prebuilt.tool_executor import ToolExecutor
from langgraph.prebuilt.react import create_react_plan_and_execute
from langgraph.checkpoint.sqlite import SqliteSaver
from langgraph.graph.message import add_messages
from langchain_core.messages import HumanMessage
from langchain_anthropic import ChatAnthropic
# Load API Key securely
os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY")
# Define the LLM (Claude 3.5 Sonnet)
llm = ChatAnthropic(model="claude-3-5-sonnet-latest")
# System prompt modifier
def make_system_prompt(suffix: str) -> str:
return (
"You are a helpful AI assistant, collaborating with other assistants."
" Use the provided tools to progress toward answering the question."
" If you cannot fully answer, another assistant will continue where you left off."
" If you or the team has a complete answer, prefix it with FINAL ANSWER.\n"
f"{suffix}"
)
# Node 1: Research assistant logic
def research_node(state: dict) -> dict:
messages = state.get("messages", [])
prompt = make_system_prompt("You can only do research.")
executor = create_react_plan_and_execute(llm=llm, tools=[], system_prompt=prompt)
response = executor.invoke(messages)
messages.append(HumanMessage(content=response.content, name="researcher"))
next_node = "chart_generator" if "FINAL ANSWER" not in response.content else END
return {"messages": messages, "next": next_node}
# Node 2: Chart assistant logic
def chart_node(state: dict) -> dict:
messages = state.get("messages", [])
prompt = make_system_prompt("You can only generate charts.")
executor = create_react_plan_and_execute(llm=llm, tools=[], system_prompt=prompt)
response = executor.invoke(messages)
messages.append(HumanMessage(content=response.content, name="chart_generator"))
return {"messages": messages, "next": END}
# Define LangGraph flow
workflow = StateGraph(dict)
workflow.add_node("researcher", research_node)
workflow.add_node("chart_generator", chart_node)
workflow.set_entry_point("researcher")
workflow.set_finish_point(END)
workflow.add_conditional_edges("researcher", lambda x: x["next"])
workflow.add_edge("chart_generator", END)
graph = workflow.compile()
# Function to run the graph and optionally return chart
def run_langgraph(input_text):
try:
events = graph.stream({"messages": [HumanMessage(content=input_text)]})
output = list(events)[-1]
final_content = output["messages"][-1].content
if "FINAL ANSWER" in final_content:
# Example static chart
years = [2020, 2021, 2022, 2023, 2024]
gdp = [21.4, 22.0, 23.1, 24.8, 26.2]
plt.figure()
plt.plot(years, gdp, marker="o")
plt.title("USA GDP Over Last 5 Years")
plt.xlabel("Year")
plt.ylabel("GDP in Trillions")
plt.grid(True)
plt.tight_layout()
plt.savefig("gdp_chart.png")
return "Chart generated based on FINAL ANSWER", "gdp_chart.png"
else:
return final_content, None
except Exception as e:
return f"Error: {str(e)}", None
# Gradio Interface
def process_input(user_input):
return run_langgraph(user_input)
interface = gr.Interface(
fn=process_input,
inputs=gr.Textbox(label="Enter your research task"),
outputs=[gr.Textbox(label="Output"), gr.Image(type="filepath", label="Chart")],
title="LangGraph Research Automation",
description="Enter a research prompt and view chart output when applicable."
)
if __name__ == "__main__":
interface.launch()
|