File size: 7,021 Bytes
de9f6ca
 
d1d9178
1effecd
10e4c2d
118c5fc
 
 
de9f6ca
118c5fc
038f4ad
 
 
 
 
 
118c5fc
 
 
 
de9f6ca
eae3210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
038f4ad
118c5fc
038f4ad
de9f6ca
118c5fc
 
 
 
 
 
 
de9f6ca
 
118c5fc
038f4ad
118c5fc
 
038f4ad
 
 
118c5fc
038f4ad
118c5fc
 
038f4ad
118c5fc
 
 
 
 
038f4ad
118c5fc
 
 
 
 
 
 
 
038f4ad
10e4c2d
118c5fc
038f4ad
118c5fc
 
038f4ad
 
 
 
 
118c5fc
 
038f4ad
118c5fc
 
 
 
 
 
 
 
 
 
038f4ad
10e4c2d
118c5fc
 
de9f6ca
 
d85a20b
10e4c2d
 
de9f6ca
 
038f4ad
118c5fc
d1d9178
118c5fc
 
 
 
 
 
 
 
 
 
 
 
 
d85a20b
 
 
118c5fc
d85a20b
 
10e4c2d
d85a20b
 
038f4ad
 
d85a20b
118c5fc
 
 
 
d1d9178
118c5fc
 
 
d85a20b
 
118c5fc
d85a20b
 
de9f6ca
 
d85a20b
 
 
de9f6ca
d1d9178
de9f6ca
 
 
f3f7f32
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
import gradio as gr
import os
import matplotlib.pyplot as plt
import pandas as pd
from langgraph.graph import StateGraph
from langchain_core.messages import HumanMessage, AIMessage
import warnings
warnings.filterwarnings("ignore")

# Define a Command class for langgraph 0.0.41
class Command:
    def __init__(self, update=None, next=None, goto=None):
        self.update = update or {}
        self.next = next
        self.goto = goto

# Set API key (ensure you add this as a secret in HF Spaces)
api_key = os.getenv("ANTHROPIC_API_KEY")
if not api_key:
    raise ValueError("ANTHROPIC_API_KEY environment variable not set")

# Create a custom LLM implementation to avoid the proxies issue
def create_llm():
    # Directly use the Anthropic client instead of LangChain's wrapper
    from anthropic import Anthropic
    
    # Create the base client without any proxies
    client = Anthropic(api_key=api_key)
    
    # Create a simple wrapper function that mimics the LangChain interface
    class CustomAnthropicLLM:
        def __init__(self, client, model):
            self.client = client
            self.model = model
        
        def invoke(self, inputs):
            if isinstance(inputs, dict) and "messages" in inputs:
                messages = inputs["messages"]
                formatted_messages = []
                
                for msg in messages:
                    role = "user" if isinstance(msg, HumanMessage) else "assistant"
                    formatted_messages.append({"role": role, "content": msg.content})
                
                response = self.client.messages.create(
                    model=self.model,
                    messages=formatted_messages,
                    max_tokens=1024
                )
                return response.content[0].text
            
            elif isinstance(inputs, str):
                response = self.client.messages.create(
                    model=self.model, 
                    messages=[{"role": "user", "content": inputs}],
                    max_tokens=1024
                )
                return response.content[0].text
            
            else:
                raise ValueError(f"Unsupported input format: {type(inputs)}")
    
    return CustomAnthropicLLM(client, "claude-3-5-sonnet-20240229")

# Create our custom LLM
llm = create_llm()

# System prompt constructor
def make_system_prompt(suffix: str) -> str:
    return (
        "You are a helpful AI assistant, collaborating with other assistants. "
        "Use the provided tools to progress towards answering the question. "
        "If you are unable to fully answer, that's OK—another assistant with different tools "
        "will help where you left off. Execute what you can to make progress. "
        "If you or any of the other assistants have the final answer or deliverable, "
        "prefix your response with FINAL ANSWER so the team knows to stop.\n"
        f"{suffix}"
    )

# Research phase
def research_node(state):
    # Create a custom research agent using langgraph 0.0.41 compatible approach
    from langgraph.prebuilt import create_agent_executor
    agent = create_agent_executor(
        llm,
        tools=[],
        system_message=make_system_prompt("You can only do research.")
    )
    
    # Process the current state
    result = agent.invoke(state)
    
    # Check if we have a final answer
    last_message = result["messages"][-1]
    content = last_message.content if hasattr(last_message, "content") else last_message
    
    # Determine next step
    goto = "chart_generator" if "FINAL ANSWER" not in content else "__end__"
    
    # Create an AIMessage with the researcher name
    if not isinstance(last_message, dict):
        result["messages"][-1] = AIMessage(content=content, name="researcher")
    else:
        result["messages"][-1]["name"] = "researcher"
    
    return Command(update={"messages": result["messages"]}, goto=goto)

# Chart generation phase
def chart_node(state):
    # Create a custom chart generator agent
    from langgraph.prebuilt import create_agent_executor
    agent = create_agent_executor(
        llm,
        tools=[],
        system_message=make_system_prompt("You can only generate charts.")
    )
    
    # Process the current state
    result = agent.invoke(state)
    
    # Add the chart_generator name
    last_message = result["messages"][-1]
    content = last_message.content if hasattr(last_message, "content") else last_message
    
    if not isinstance(last_message, dict):
        result["messages"][-1] = AIMessage(content=content, name="chart_generator")
    else:
        result["messages"][-1]["name"] = "chart_generator"
    
    return Command(update={"messages": result["messages"]}, goto="__end__")

# Build LangGraph
workflow = StateGraph(dict)  # Using dict for state in langgraph 0.0.41
workflow.add_node("researcher", research_node)
workflow.add_node("chart_generator", chart_node)
workflow.set_entry_point("researcher")
workflow.set_finish_point("__end__")
workflow.add_edge("researcher", "chart_generator")
graph = workflow.compile()

# LangGraph runner
def run_langgraph(user_input):
    try:
        # Create a human message
        human_message = HumanMessage(content=user_input)
        
        # Stream the events
        events = graph.stream({"messages": [human_message]})
        outputs = list(events)
        
        # Get the final message
        final_message = outputs[-1]["messages"][-1]
        final_content = final_message.content if hasattr(final_message, "content") else final_message
        
        if isinstance(final_content, str) and "FINAL ANSWER" in final_content:
            # Simulated chart (you can later parse dynamic values if needed)
            years = [2020, 2021, 2022, 2023, 2024]
            gdp = [21.4, 22.0, 23.1, 24.8, 26.2]
            plt.figure()
            plt.plot(years, gdp, marker='o')
            plt.title("USA GDP Over Last 5 Years")
            plt.xlabel("Year")
            plt.ylabel("GDP in Trillions USD")
            plt.grid(True)
            plt.tight_layout()
            plt.savefig("gdp_chart.png")
            return "Chart generated based on FINAL ANSWER.", "gdp_chart.png"
        else:
            if isinstance(final_content, str):
                return final_content, None
            else:
                return str(final_content), None
    except Exception as e:
        print(f"Error in run_langgraph: {e}")
        import traceback
        traceback.print_exc()
        return f"Error: {str(e)}", None

# Gradio UI
def process_input(user_input):
    return run_langgraph(user_input)

interface = gr.Interface(
    fn=process_input,
    inputs=gr.Textbox(label="Enter your research task"),
    outputs=[gr.Textbox(label="Output"), gr.Image(type="filepath", label="Chart")],
    title="LangGraph Research Automation",
    description="Enter a research prompt and view chart output when applicable."
)

if __name__ == "__main__":
    interface.launch()