SyedHasanCronosPMC's picture
Update app.py
81a5656 verified
import gradio as gr
import os
import matplotlib.pyplot as plt
import pandas as pd
from langgraph.graph import StateGraph
from langchain_core.messages import HumanMessage, AIMessage
import warnings
warnings.filterwarnings("ignore")
# Define a Command class for langgraph 0.0.41
class Command:
def __init__(self, update=None, next=None, goto=None):
self.update = update or {}
self.next = next
self.goto = goto
# Set API key (ensure you add this as a secret in HF Spaces)
api_key = os.getenv("ANTHROPIC_API_KEY")
if not api_key:
raise ValueError("ANTHROPIC_API_KEY environment variable not set")
# Mock LLM implementation that doesn't rely on any external HTTP clients
class MockLLM:
def __init__(self):
self.model = "mock-model"
def invoke(self, inputs):
if isinstance(inputs, dict) and "messages" in inputs:
# Process the messages
content = ""
for msg in inputs["messages"]:
if hasattr(msg, "content"):
content += msg.content + "\n"
elif isinstance(msg, dict) and "content" in msg:
content += msg["content"] + "\n"
# For research queries
if "research" in content.lower():
return "Based on my research, here's what I found about your topic. FINAL ANSWER: This topic shows trends that would be interesting to visualize in a chart."
# For chart generation
else:
return "I've analyzed the data and created a chart visualization. FINAL ANSWER: The chart shows an upward trend from 2020 to 2024."
elif isinstance(inputs, str):
if "research" in inputs.lower():
return "Based on my research, here's what I found about your topic. FINAL ANSWER: This topic shows trends that would be interesting to visualize in a chart."
else:
return "I've analyzed the data and created a chart visualization. FINAL ANSWER: The chart shows an upward trend from 2020 to 2024."
else:
return "I've processed your request. FINAL ANSWER: Here's a summary of what I found."
# Create our mock LLM
llm = MockLLM()
# System prompt constructor
def make_system_prompt(suffix: str) -> str:
return (
"You are a helpful AI assistant, collaborating with other assistants. "
"Use the provided tools to progress towards answering the question. "
"If you are unable to fully answer, that's OK—another assistant with different tools "
"will help where you left off. Execute what you can to make progress. "
"If you or any of the other assistants have the final answer or deliverable, "
"prefix your response with FINAL ANSWER so the team knows to stop.\n"
f"{suffix}"
)
# Research phase
def research_node(state):
# Create a custom research agent using langgraph 0.0.41 compatible approach
from langgraph.prebuilt import create_agent_executor
agent = create_agent_executor(
llm,
tools=[],
system_message=make_system_prompt("You can only do research.")
)
# Process the current state
result = agent.invoke(state)
# Check if we have a final answer
last_message = result["messages"][-1]
content = last_message.content if hasattr(last_message, "content") else last_message
# Create an AIMessage with the researcher name
if not isinstance(last_message, dict):
result["messages"][-1] = AIMessage(content=content, name="researcher")
else:
result["messages"][-1]["name"] = "researcher"
# In langgraph 0.0.41 style, return the updated state
return {"messages": result["messages"], "has_final_answer": "FINAL ANSWER" in content}
# Chart generation phase
def chart_node(state):
# Create a custom chart generator agent
from langgraph.prebuilt import create_agent_executor
agent = create_agent_executor(
llm,
tools=[],
system_message=make_system_prompt("You can only generate charts.")
)
# Process the current state
result = agent.invoke(state)
# Add the chart_generator name
last_message = result["messages"][-1]
content = last_message.content if hasattr(last_message, "content") else last_message
if not isinstance(last_message, dict):
result["messages"][-1] = AIMessage(content=content, name="chart_generator")
else:
result["messages"][-1]["name"] = "chart_generator"
# Return the updated state
return {"messages": result["messages"], "has_final_answer": True}
# Define the router function for the graph
def router(state):
# Check if we have a final answer
return "chart_generator" if not state.get("has_final_answer", False) else "end"
# Simplified end node that doesn't modify the state
def end_node(state):
return state
# Build a proper LangGraph for v0.0.41
workflow = StateGraph({"messages": [], "has_final_answer": False})
workflow.add_node("researcher", research_node)
workflow.add_node("chart_generator", chart_node)
workflow.add_node("end", end_node)
workflow.set_entry_point("researcher")
workflow.add_conditional_edges("researcher", router)
workflow.add_edge("chart_generator", "end")
graph = workflow.compile()
# LangGraph runner
def run_langgraph(user_input):
try:
# Create a human message
human_message = HumanMessage(content=user_input)
# Run the workflow with proper initialization
events = graph.stream({"messages": [human_message], "has_final_answer": False})
outputs = list(events)
# Get the final message
final_state = outputs[-1]
final_messages = final_state.get("messages", [])
if not final_messages:
return "Error: No messages in the final state.", None
final_message = final_messages[-1]
final_content = final_message.content if hasattr(final_message, "content") else final_message
if isinstance(final_content, str) and "FINAL ANSWER" in final_content:
# Simulated chart (you can later parse dynamic values if needed)
years = [2020, 2021, 2022, 2023, 2024]
gdp = [21.4, 22.0, 23.1, 24.8, 26.2]
plt.figure()
plt.plot(years, gdp, marker='o')
plt.title("USA GDP Over Last 5 Years")
plt.xlabel("Year")
plt.ylabel("GDP in Trillions USD")
plt.grid(True)
plt.tight_layout()
plt.savefig("gdp_chart.png")
return "Chart generated based on FINAL ANSWER.", "gdp_chart.png"
else:
if isinstance(final_content, str):
return final_content, None
else:
return str(final_content), None
except Exception as e:
print(f"Error in run_langgraph: {e}")
import traceback
traceback.print_exc()
return f"Error: {str(e)}", None
# Gradio UI
def process_input(user_input):
return run_langgraph(user_input)
interface = gr.Interface(
fn=process_input,
inputs=gr.Textbox(label="Enter your research task"),
outputs=[gr.Textbox(label="Output"), gr.Image(type="filepath", label="Chart")],
title="LangGraph Research Automation",
description="Enter a research prompt and view chart output when applicable."
)
if __name__ == "__main__":
interface.launch()