Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
import matplotlib.pyplot as plt | |
import pandas as pd | |
from langgraph.graph import StateGraph | |
from langchain_core.messages import HumanMessage, AIMessage | |
import warnings | |
warnings.filterwarnings("ignore") | |
# Define a Command class for langgraph 0.0.41 | |
class Command: | |
def __init__(self, update=None, next=None, goto=None): | |
self.update = update or {} | |
self.next = next | |
self.goto = goto | |
# Set API key (ensure you add this as a secret in HF Spaces) | |
api_key = os.getenv("ANTHROPIC_API_KEY") | |
if not api_key: | |
raise ValueError("ANTHROPIC_API_KEY environment variable not set") | |
# Create a custom LLM implementation to avoid the proxies issue | |
def create_llm(): | |
# Directly use the Anthropic client instead of LangChain's wrapper | |
from anthropic import Anthropic | |
# Create the base client without any proxies | |
client = Anthropic(api_key=api_key) | |
# Create a simple wrapper function that mimics the LangChain interface | |
class CustomAnthropicLLM: | |
def __init__(self, client, model): | |
self.client = client | |
self.model = model | |
def invoke(self, inputs): | |
if isinstance(inputs, dict) and "messages" in inputs: | |
messages = inputs["messages"] | |
formatted_messages = [] | |
for msg in messages: | |
role = "user" if isinstance(msg, HumanMessage) else "assistant" | |
formatted_messages.append({"role": role, "content": msg.content}) | |
response = self.client.messages.create( | |
model=self.model, | |
messages=formatted_messages, | |
max_tokens=1024 | |
) | |
return response.content[0].text | |
elif isinstance(inputs, str): | |
response = self.client.messages.create( | |
model=self.model, | |
messages=[{"role": "user", "content": inputs}], | |
max_tokens=1024 | |
) | |
return response.content[0].text | |
else: | |
raise ValueError(f"Unsupported input format: {type(inputs)}") | |
return CustomAnthropicLLM(client, "claude-3-5-sonnet-20240229") | |
# Create our custom LLM | |
llm = create_llm() | |
# System prompt constructor | |
def make_system_prompt(suffix: str) -> str: | |
return ( | |
"You are a helpful AI assistant, collaborating with other assistants. " | |
"Use the provided tools to progress towards answering the question. " | |
"If you are unable to fully answer, that's OK—another assistant with different tools " | |
"will help where you left off. Execute what you can to make progress. " | |
"If you or any of the other assistants have the final answer or deliverable, " | |
"prefix your response with FINAL ANSWER so the team knows to stop.\n" | |
f"{suffix}" | |
) | |
# Research phase | |
def research_node(state): | |
# Create a custom research agent using langgraph 0.0.41 compatible approach | |
from langgraph.prebuilt import create_agent_executor | |
agent = create_agent_executor( | |
llm, | |
tools=[], | |
system_message=make_system_prompt("You can only do research.") | |
) | |
# Process the current state | |
result = agent.invoke(state) | |
# Check if we have a final answer | |
last_message = result["messages"][-1] | |
content = last_message.content if hasattr(last_message, "content") else last_message | |
# Determine next step | |
goto = "chart_generator" if "FINAL ANSWER" not in content else "__end__" | |
# Create an AIMessage with the researcher name | |
if not isinstance(last_message, dict): | |
result["messages"][-1] = AIMessage(content=content, name="researcher") | |
else: | |
result["messages"][-1]["name"] = "researcher" | |
return Command(update={"messages": result["messages"]}, goto=goto) | |
# Chart generation phase | |
def chart_node(state): | |
# Create a custom chart generator agent | |
from langgraph.prebuilt import create_agent_executor | |
agent = create_agent_executor( | |
llm, | |
tools=[], | |
system_message=make_system_prompt("You can only generate charts.") | |
) | |
# Process the current state | |
result = agent.invoke(state) | |
# Add the chart_generator name | |
last_message = result["messages"][-1] | |
content = last_message.content if hasattr(last_message, "content") else last_message | |
if not isinstance(last_message, dict): | |
result["messages"][-1] = AIMessage(content=content, name="chart_generator") | |
else: | |
result["messages"][-1]["name"] = "chart_generator" | |
return Command(update={"messages": result["messages"]}, goto="__end__") | |
# Build LangGraph | |
workflow = StateGraph(dict) # Using dict for state in langgraph 0.0.41 | |
workflow.add_node("researcher", research_node) | |
workflow.add_node("chart_generator", chart_node) | |
workflow.set_entry_point("researcher") | |
workflow.set_finish_point("__end__") | |
workflow.add_edge("researcher", "chart_generator") | |
graph = workflow.compile() | |
# LangGraph runner | |
def run_langgraph(user_input): | |
try: | |
# Create a human message | |
human_message = HumanMessage(content=user_input) | |
# Stream the events | |
events = graph.stream({"messages": [human_message]}) | |
outputs = list(events) | |
# Get the final message | |
final_message = outputs[-1]["messages"][-1] | |
final_content = final_message.content if hasattr(final_message, "content") else final_message | |
if isinstance(final_content, str) and "FINAL ANSWER" in final_content: | |
# Simulated chart (you can later parse dynamic values if needed) | |
years = [2020, 2021, 2022, 2023, 2024] | |
gdp = [21.4, 22.0, 23.1, 24.8, 26.2] | |
plt.figure() | |
plt.plot(years, gdp, marker='o') | |
plt.title("USA GDP Over Last 5 Years") | |
plt.xlabel("Year") | |
plt.ylabel("GDP in Trillions USD") | |
plt.grid(True) | |
plt.tight_layout() | |
plt.savefig("gdp_chart.png") | |
return "Chart generated based on FINAL ANSWER.", "gdp_chart.png" | |
else: | |
if isinstance(final_content, str): | |
return final_content, None | |
else: | |
return str(final_content), None | |
except Exception as e: | |
print(f"Error in run_langgraph: {e}") | |
import traceback | |
traceback.print_exc() | |
return f"Error: {str(e)}", None | |
# Gradio UI | |
def process_input(user_input): | |
return run_langgraph(user_input) | |
interface = gr.Interface( | |
fn=process_input, | |
inputs=gr.Textbox(label="Enter your research task"), | |
outputs=[gr.Textbox(label="Output"), gr.Image(type="filepath", label="Chart")], | |
title="LangGraph Research Automation", | |
description="Enter a research prompt and view chart output when applicable." | |
) | |
if __name__ == "__main__": | |
interface.launch() |