Spaces:
Runtime error
Runtime error
File size: 7,498 Bytes
de9f6ca d1d9178 1effecd 10e4c2d 118c5fc de9f6ca 118c5fc 038f4ad 118c5fc de9f6ca b7cea2a eae3210 b7cea2a eae3210 b7cea2a eae3210 b7cea2a eae3210 b7cea2a 038f4ad 118c5fc 038f4ad de9f6ca 118c5fc de9f6ca 118c5fc 038f4ad 118c5fc 038f4ad 118c5fc 038f4ad 118c5fc 038f4ad 118c5fc 81a5656 10e4c2d 118c5fc 038f4ad 118c5fc 038f4ad 118c5fc 038f4ad 118c5fc 81a5656 114e629 81a5656 114e629 10e4c2d 81a5656 de9f6ca 114e629 d85a20b 81a5656 114e629 de9f6ca 038f4ad 118c5fc d1d9178 118c5fc 81a5656 118c5fc 81a5656 118c5fc d85a20b 118c5fc d85a20b 10e4c2d d85a20b 038f4ad d85a20b 118c5fc d1d9178 118c5fc d85a20b 118c5fc d85a20b de9f6ca d85a20b de9f6ca d1d9178 de9f6ca f3f7f32 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
import gradio as gr
import os
import matplotlib.pyplot as plt
import pandas as pd
from langgraph.graph import StateGraph
from langchain_core.messages import HumanMessage, AIMessage
import warnings
warnings.filterwarnings("ignore")
# Define a Command class for langgraph 0.0.41
class Command:
def __init__(self, update=None, next=None, goto=None):
self.update = update or {}
self.next = next
self.goto = goto
# Set API key (ensure you add this as a secret in HF Spaces)
api_key = os.getenv("ANTHROPIC_API_KEY")
if not api_key:
raise ValueError("ANTHROPIC_API_KEY environment variable not set")
# Mock LLM implementation that doesn't rely on any external HTTP clients
class MockLLM:
def __init__(self):
self.model = "mock-model"
def invoke(self, inputs):
if isinstance(inputs, dict) and "messages" in inputs:
# Process the messages
content = ""
for msg in inputs["messages"]:
if hasattr(msg, "content"):
content += msg.content + "\n"
elif isinstance(msg, dict) and "content" in msg:
content += msg["content"] + "\n"
# For research queries
if "research" in content.lower():
return "Based on my research, here's what I found about your topic. FINAL ANSWER: This topic shows trends that would be interesting to visualize in a chart."
# For chart generation
else:
return "I've analyzed the data and created a chart visualization. FINAL ANSWER: The chart shows an upward trend from 2020 to 2024."
elif isinstance(inputs, str):
if "research" in inputs.lower():
return "Based on my research, here's what I found about your topic. FINAL ANSWER: This topic shows trends that would be interesting to visualize in a chart."
else:
return "I've analyzed the data and created a chart visualization. FINAL ANSWER: The chart shows an upward trend from 2020 to 2024."
else:
return "I've processed your request. FINAL ANSWER: Here's a summary of what I found."
# Create our mock LLM
llm = MockLLM()
# System prompt constructor
def make_system_prompt(suffix: str) -> str:
return (
"You are a helpful AI assistant, collaborating with other assistants. "
"Use the provided tools to progress towards answering the question. "
"If you are unable to fully answer, that's OK—another assistant with different tools "
"will help where you left off. Execute what you can to make progress. "
"If you or any of the other assistants have the final answer or deliverable, "
"prefix your response with FINAL ANSWER so the team knows to stop.\n"
f"{suffix}"
)
# Research phase
def research_node(state):
# Create a custom research agent using langgraph 0.0.41 compatible approach
from langgraph.prebuilt import create_agent_executor
agent = create_agent_executor(
llm,
tools=[],
system_message=make_system_prompt("You can only do research.")
)
# Process the current state
result = agent.invoke(state)
# Check if we have a final answer
last_message = result["messages"][-1]
content = last_message.content if hasattr(last_message, "content") else last_message
# Create an AIMessage with the researcher name
if not isinstance(last_message, dict):
result["messages"][-1] = AIMessage(content=content, name="researcher")
else:
result["messages"][-1]["name"] = "researcher"
# In langgraph 0.0.41 style, return the updated state
return {"messages": result["messages"], "has_final_answer": "FINAL ANSWER" in content}
# Chart generation phase
def chart_node(state):
# Create a custom chart generator agent
from langgraph.prebuilt import create_agent_executor
agent = create_agent_executor(
llm,
tools=[],
system_message=make_system_prompt("You can only generate charts.")
)
# Process the current state
result = agent.invoke(state)
# Add the chart_generator name
last_message = result["messages"][-1]
content = last_message.content if hasattr(last_message, "content") else last_message
if not isinstance(last_message, dict):
result["messages"][-1] = AIMessage(content=content, name="chart_generator")
else:
result["messages"][-1]["name"] = "chart_generator"
# Return the updated state
return {"messages": result["messages"], "has_final_answer": True}
# Define the router function for the graph
def router(state):
# Check if we have a final answer
return "chart_generator" if not state.get("has_final_answer", False) else "end"
# Simplified end node that doesn't modify the state
def end_node(state):
return state
# Build a proper LangGraph for v0.0.41
workflow = StateGraph({"messages": [], "has_final_answer": False})
workflow.add_node("researcher", research_node)
workflow.add_node("chart_generator", chart_node)
workflow.add_node("end", end_node)
workflow.set_entry_point("researcher")
workflow.add_conditional_edges("researcher", router)
workflow.add_edge("chart_generator", "end")
graph = workflow.compile()
# LangGraph runner
def run_langgraph(user_input):
try:
# Create a human message
human_message = HumanMessage(content=user_input)
# Run the workflow with proper initialization
events = graph.stream({"messages": [human_message], "has_final_answer": False})
outputs = list(events)
# Get the final message
final_state = outputs[-1]
final_messages = final_state.get("messages", [])
if not final_messages:
return "Error: No messages in the final state.", None
final_message = final_messages[-1]
final_content = final_message.content if hasattr(final_message, "content") else final_message
if isinstance(final_content, str) and "FINAL ANSWER" in final_content:
# Simulated chart (you can later parse dynamic values if needed)
years = [2020, 2021, 2022, 2023, 2024]
gdp = [21.4, 22.0, 23.1, 24.8, 26.2]
plt.figure()
plt.plot(years, gdp, marker='o')
plt.title("USA GDP Over Last 5 Years")
plt.xlabel("Year")
plt.ylabel("GDP in Trillions USD")
plt.grid(True)
plt.tight_layout()
plt.savefig("gdp_chart.png")
return "Chart generated based on FINAL ANSWER.", "gdp_chart.png"
else:
if isinstance(final_content, str):
return final_content, None
else:
return str(final_content), None
except Exception as e:
print(f"Error in run_langgraph: {e}")
import traceback
traceback.print_exc()
return f"Error: {str(e)}", None
# Gradio UI
def process_input(user_input):
return run_langgraph(user_input)
interface = gr.Interface(
fn=process_input,
inputs=gr.Textbox(label="Enter your research task"),
outputs=[gr.Textbox(label="Output"), gr.Image(type="filepath", label="Chart")],
title="LangGraph Research Automation",
description="Enter a research prompt and view chart output when applicable."
)
if __name__ == "__main__":
interface.launch() |