SyedHasanCronosPMC commited on
Commit
118c5fc
·
verified ·
1 Parent(s): f3f7f32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -38
app.py CHANGED
@@ -3,67 +3,102 @@ import os
3
  import matplotlib.pyplot as plt
4
  import pandas as pd
5
  from langgraph.graph import StateGraph
6
- from langgraph.prebuilt import create_agent_executor
7
- from langchain_core.messages import HumanMessage
8
  from langchain_anthropic import ChatAnthropic
 
 
9
 
10
- # Fallback Command class (since langgraph.types is not available in v0.0.41)
11
  class Command:
12
  def __init__(self, update=None, next=None, goto=None):
13
  self.update = update or {}
14
  self.next = next
15
  self.goto = goto
16
 
17
- # Set the API key from Hugging Face secrets
18
- os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY")
 
 
19
 
20
- # Claude 3.5 Sonnet model - remove any default parameters that might be causing issues
21
- llm = ChatAnthropic(
22
- model="claude-3-5-sonnet-20240229",
23
- # No additional parameters that might cause conflicts
24
- )
 
 
 
 
 
 
25
 
26
- # Utility to build system prompts
27
  def make_system_prompt(suffix: str) -> str:
28
  return (
29
- "You are a helpful AI assistant collaborating with others."
30
- " Use your tools to assist. If you can't complete a task, leave it to another agent."
31
- " Prefix final output with 'FINAL ANSWER' to signal completion.\n" + suffix
 
 
 
 
32
  )
33
 
34
- # Research agent node
35
  def research_node(state):
 
 
36
  agent = create_agent_executor(
37
  llm,
38
  tools=[],
39
- system_message=make_system_prompt("You can only perform research.")
40
  )
 
 
41
  result = agent.invoke(state)
42
- last_msg = result["messages"][-1]
 
 
 
 
43
  # Determine next step
44
- goto = "chart_generator" if "FINAL ANSWER" not in last_msg.content else "__end__"
45
- result["messages"][-1] = HumanMessage(
46
- content=last_msg.content,
47
- name="researcher"
48
- )
 
 
 
49
  return Command(update={"messages": result["messages"]}, goto=goto)
50
 
51
- # Chart generation agent node
52
  def chart_node(state):
 
 
53
  agent = create_agent_executor(
54
  llm,
55
  tools=[],
56
  system_message=make_system_prompt("You can only generate charts.")
57
  )
 
 
58
  result = agent.invoke(state)
59
- result["messages"][-1] = HumanMessage(
60
- content=result["messages"][-1].content,
61
- name="chart_generator"
62
- )
 
 
 
 
 
 
63
  return Command(update={"messages": result["messages"]}, goto="__end__")
64
 
65
- # LangGraph state setup
66
- workflow = StateGraph(dict)
67
  workflow.add_node("researcher", research_node)
68
  workflow.add_node("chart_generator", chart_node)
69
  workflow.set_entry_point("researcher")
@@ -72,17 +107,25 @@ workflow.add_edge("researcher", "chart_generator")
72
  graph = workflow.compile()
73
 
74
  # LangGraph runner
75
- def run_langgraph(input_text):
76
  try:
77
- events = graph.stream({"messages": [HumanMessage(content=input_text)]})
78
- output = list(events)
79
- final_response = output[-1]["messages"][-1].content
80
- if "FINAL ANSWER" in final_response:
81
- # Dummy chart generation
 
 
 
 
 
 
 
 
82
  years = [2020, 2021, 2022, 2023, 2024]
83
  gdp = [21.4, 22.0, 23.1, 24.8, 26.2]
84
  plt.figure()
85
- plt.plot(years, gdp, marker="o")
86
  plt.title("USA GDP Over Last 5 Years")
87
  plt.xlabel("Year")
88
  plt.ylabel("GDP in Trillions USD")
@@ -91,11 +134,17 @@ def run_langgraph(input_text):
91
  plt.savefig("gdp_chart.png")
92
  return "Chart generated based on FINAL ANSWER.", "gdp_chart.png"
93
  else:
94
- return final_response, None
 
 
 
95
  except Exception as e:
 
 
 
96
  return f"Error: {str(e)}", None
97
 
98
- # Gradio interface
99
  def process_input(user_input):
100
  return run_langgraph(user_input)
101
 
 
3
  import matplotlib.pyplot as plt
4
  import pandas as pd
5
  from langgraph.graph import StateGraph
6
+ from langgraph_core.state import MessagesState
7
+ from langchain_core.messages import HumanMessage, AIMessage
8
  from langchain_anthropic import ChatAnthropic
9
+ import warnings
10
+ warnings.filterwarnings("ignore")
11
 
12
+ # Define a Command class for langgraph 0.0.41
13
  class Command:
14
  def __init__(self, update=None, next=None, goto=None):
15
  self.update = update or {}
16
  self.next = next
17
  self.goto = goto
18
 
19
+ # Set API key (ensure you add this as a secret in HF Spaces)
20
+ api_key = os.getenv("ANTHROPIC_API_KEY")
21
+ if not api_key:
22
+ raise ValueError("ANTHROPIC_API_KEY environment variable not set")
23
 
24
+ # Load Claude 3.5 Sonnet model
25
+ # Using a direct approach to avoid proxies issue
26
+ try:
27
+ # Explicitly create with minimal parameters
28
+ llm = ChatAnthropic(api_key=api_key, model="claude-3-5-sonnet-20240229")
29
+ except Exception as e:
30
+ print(f"Error initializing ChatAnthropic: {e}")
31
+ # Fallback initialization if needed
32
+ import anthropic
33
+ client = anthropic.Anthropic(api_key=api_key)
34
+ llm = ChatAnthropic(client=client, model="claude-3-5-sonnet-20240229")
35
 
36
+ # System prompt constructor
37
  def make_system_prompt(suffix: str) -> str:
38
  return (
39
+ "You are a helpful AI assistant, collaborating with other assistants. "
40
+ "Use the provided tools to progress towards answering the question. "
41
+ "If you are unable to fully answer, that's OK—another assistant with different tools "
42
+ "will help where you left off. Execute what you can to make progress. "
43
+ "If you or any of the other assistants have the final answer or deliverable, "
44
+ "prefix your response with FINAL ANSWER so the team knows to stop.\n"
45
+ f"{suffix}"
46
  )
47
 
48
+ # Research phase
49
  def research_node(state):
50
+ # Create a custom research agent using langgraph 0.0.41 compatible approach
51
+ from langgraph.prebuilt import create_agent_executor
52
  agent = create_agent_executor(
53
  llm,
54
  tools=[],
55
+ system_message=make_system_prompt("You can only do research.")
56
  )
57
+
58
+ # Process the current state
59
  result = agent.invoke(state)
60
+
61
+ # Check if we have a final answer
62
+ last_message = result["messages"][-1]
63
+ content = last_message.content if hasattr(last_message, "content") else last_message
64
+
65
  # Determine next step
66
+ goto = "chart_generator" if "FINAL ANSWER" not in content else "__end__"
67
+
68
+ # Create an AIMessage with the researcher name
69
+ if not isinstance(last_message, dict):
70
+ result["messages"][-1] = AIMessage(content=content, name="researcher")
71
+ else:
72
+ result["messages"][-1]["name"] = "researcher"
73
+
74
  return Command(update={"messages": result["messages"]}, goto=goto)
75
 
76
+ # Chart generation phase
77
  def chart_node(state):
78
+ # Create a custom chart generator agent
79
+ from langgraph.prebuilt import create_agent_executor
80
  agent = create_agent_executor(
81
  llm,
82
  tools=[],
83
  system_message=make_system_prompt("You can only generate charts.")
84
  )
85
+
86
+ # Process the current state
87
  result = agent.invoke(state)
88
+
89
+ # Add the chart_generator name
90
+ last_message = result["messages"][-1]
91
+ content = last_message.content if hasattr(last_message, "content") else last_message
92
+
93
+ if not isinstance(last_message, dict):
94
+ result["messages"][-1] = AIMessage(content=content, name="chart_generator")
95
+ else:
96
+ result["messages"][-1]["name"] = "chart_generator"
97
+
98
  return Command(update={"messages": result["messages"]}, goto="__end__")
99
 
100
+ # Build LangGraph
101
+ workflow = StateGraph(dict) # Using dict for state in langgraph 0.0.41
102
  workflow.add_node("researcher", research_node)
103
  workflow.add_node("chart_generator", chart_node)
104
  workflow.set_entry_point("researcher")
 
107
  graph = workflow.compile()
108
 
109
  # LangGraph runner
110
+ def run_langgraph(user_input):
111
  try:
112
+ # Create a human message
113
+ human_message = HumanMessage(content=user_input)
114
+
115
+ # Stream the events
116
+ events = graph.stream({"messages": [human_message]})
117
+ outputs = list(events)
118
+
119
+ # Get the final message
120
+ final_message = outputs[-1]["messages"][-1]
121
+ final_content = final_message.content if hasattr(final_message, "content") else final_message
122
+
123
+ if isinstance(final_content, str) and "FINAL ANSWER" in final_content:
124
+ # Simulated chart (you can later parse dynamic values if needed)
125
  years = [2020, 2021, 2022, 2023, 2024]
126
  gdp = [21.4, 22.0, 23.1, 24.8, 26.2]
127
  plt.figure()
128
+ plt.plot(years, gdp, marker='o')
129
  plt.title("USA GDP Over Last 5 Years")
130
  plt.xlabel("Year")
131
  plt.ylabel("GDP in Trillions USD")
 
134
  plt.savefig("gdp_chart.png")
135
  return "Chart generated based on FINAL ANSWER.", "gdp_chart.png"
136
  else:
137
+ if isinstance(final_content, str):
138
+ return final_content, None
139
+ else:
140
+ return str(final_content), None
141
  except Exception as e:
142
+ print(f"Error in run_langgraph: {e}")
143
+ import traceback
144
+ traceback.print_exc()
145
  return f"Error: {str(e)}", None
146
 
147
+ # Gradio UI
148
  def process_input(user_input):
149
  return run_langgraph(user_input)
150