Nagesh Muralidhar commited on
Commit
3f968e0
·
1 Parent(s): bd04115

midterm-submission

Browse files
Files changed (3) hide show
  1. server/agents.py +1 -1
  2. server/utils.py +52 -0
  3. server/workflow.py +21 -18
server/agents.py CHANGED
@@ -15,7 +15,7 @@ import numpy as np
15
  from langchain.schema import SystemMessage, HumanMessage, AIMessage
16
  from langchain.output_parsers import PydanticOutputParser
17
  from pydantic import BaseModel, Field
18
- from workflow import save_transcript
19
 
20
  # Configure logging
21
  logging.basicConfig(
 
15
  from langchain.schema import SystemMessage, HumanMessage, AIMessage
16
  from langchain.output_parsers import PydanticOutputParser
17
  from pydantic import BaseModel, Field
18
+ from utils import save_transcript
19
 
20
  # Configure logging
21
  logging.basicConfig(
server/utils.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import uuid
4
+ import logging
5
+
6
+ # Configure logging
7
+ logger = logging.getLogger(__name__)
8
+
9
+ # Create transcripts directory if it doesn't exist
10
+ TRANSCRIPTS_DIR = os.path.join(os.path.dirname(__file__), "transcripts")
11
+ os.makedirs(TRANSCRIPTS_DIR, exist_ok=True)
12
+ TRANSCRIPTS_FILE = os.path.join(TRANSCRIPTS_DIR, "podcasts.json")
13
+
14
+ def save_transcript(podcast_script: str, user_query: str) -> None:
15
+ """Save podcast transcript to JSON file."""
16
+ # Create new transcript entry
17
+ transcript = {
18
+ "id": str(uuid.uuid4()),
19
+ "podcastScript": podcast_script,
20
+ "topic": user_query
21
+ }
22
+
23
+ try:
24
+ # Load existing transcripts
25
+ if os.path.exists(TRANSCRIPTS_FILE):
26
+ try:
27
+ with open(TRANSCRIPTS_FILE, 'r') as f:
28
+ transcripts = json.load(f)
29
+ if not isinstance(transcripts, list):
30
+ transcripts = []
31
+ except json.JSONDecodeError:
32
+ logger.warning("Error reading transcripts file, initializing empty list")
33
+ transcripts = []
34
+ else:
35
+ transcripts = []
36
+
37
+ # Append new transcript
38
+ transcripts.append(transcript)
39
+
40
+ # Save updated transcripts
41
+ with open(TRANSCRIPTS_FILE, 'w') as f:
42
+ json.dump(transcripts, f, indent=2)
43
+ logger.info("Successfully saved transcript")
44
+
45
+ except Exception as e:
46
+ logger.error(f"Error saving transcript: {str(e)}")
47
+ # Create directory if it doesn't exist
48
+ os.makedirs(os.path.dirname(TRANSCRIPTS_FILE), exist_ok=True)
49
+ # Try to save just this transcript
50
+ with open(TRANSCRIPTS_FILE, 'w') as f:
51
+ json.dump([transcript], f, indent=2)
52
+ logger.info("Saved single transcript after error")
server/workflow.py CHANGED
@@ -1,10 +1,13 @@
1
  from typing import Dict, Any, List, Annotated, TypedDict, Union, Optional
2
  from langgraph.graph import Graph, END
3
  from agents import create_agents
 
4
  import os
5
  from dotenv import load_dotenv
6
- import json
7
- import uuid
 
 
8
 
9
  # Load environment variables
10
  load_dotenv()
@@ -73,11 +76,11 @@ def create_workflow(tavily_api_key: str):
73
  # Define the extractor node function
74
  async def run_extractor(state: AgentState) -> Dict[str, Any]:
75
  query = state["messages"][-1]["content"]
76
- print(f"Extractor processing query: {query}")
77
 
78
  try:
79
  response = await agents["extractor"](query)
80
- print(f"Extractor response: {response}")
81
 
82
  # Update state
83
  state["extractor_data"] = response
@@ -88,7 +91,7 @@ def create_workflow(tavily_api_key: str):
88
  "skeptic": {"content": "Not started"},
89
  "believer": {"content": "Not started"}
90
  })
91
- print(f"Initial supervisor analysis: {supervisor_analysis}")
92
 
93
  state["supervisor_notes"].append(supervisor_analysis["content"])
94
  state["supervisor_chunks"].append(supervisor_analysis.get("chunks", {}))
@@ -97,17 +100,17 @@ def create_workflow(tavily_api_key: str):
97
  state["current_agent"] = "debate"
98
  return state
99
  except Exception as e:
100
- print(f"Error in extractor: {str(e)}")
101
  raise Exception(f"Error in extractor: {str(e)}")
102
 
103
  # Define the debate node function
104
  async def run_debate(state: AgentState) -> Dict[str, Any]:
105
- print(f"Debate turn {state['debate_turns']}")
106
 
107
  try:
108
  if state["debate_turns"] == 0:
109
  # First turn: both agents respond to extractor
110
- print("Starting first debate turn")
111
 
112
  # If we have context, use it to inform the agents' responses
113
  context = state.get("context", {})
@@ -126,7 +129,7 @@ def create_workflow(tavily_api_key: str):
126
  {"speaker": "skeptic", "content": skeptic_response["content"]},
127
  {"speaker": "believer", "content": believer_response["content"]}
128
  ])
129
- print(f"First turn responses added: {state['debate_history'][-2:]}")
130
  else:
131
  # Alternating responses based on agent type if specified
132
  if state["agent_type"] in ["believer", "skeptic"]:
@@ -136,7 +139,7 @@ def create_workflow(tavily_api_key: str):
136
  last_speaker = state["debate_history"][-1]["speaker"]
137
  current_speaker = "believer" if last_speaker == "skeptic" else "skeptic"
138
 
139
- print(f"Processing response for {current_speaker}")
140
 
141
  # Create context-aware input
142
  context = state.get("context", {})
@@ -152,7 +155,7 @@ def create_workflow(tavily_api_key: str):
152
  "speaker": current_speaker,
153
  "content": response["content"]
154
  })
155
- print(f"Added response: {state['debate_history'][-1]}")
156
 
157
  # Add supervisor note and chunks
158
  supervisor_analysis = await agents["supervisor"]({
@@ -160,26 +163,26 @@ def create_workflow(tavily_api_key: str):
160
  "skeptic": {"content": state["debate_history"][-1]["content"]},
161
  "believer": {"content": state["debate_history"][-2]["content"] if len(state["debate_history"]) > 1 else "Not started"}
162
  })
163
- print(f"Supervisor analysis: {supervisor_analysis}")
164
 
165
  state["supervisor_notes"].append(supervisor_analysis["content"])
166
  state["supervisor_chunks"].append(supervisor_analysis.get("chunks", {}))
167
 
168
  state["debate_turns"] += 1
169
- print(f"Debate turn {state['debate_turns']} completed")
170
 
171
  # End the workflow after 2 debate turns
172
  if state["debate_turns"] >= 2:
173
  state["current_agent"] = "podcast"
174
- print("Moving to podcast production")
175
 
176
  return state
177
  except Exception as e:
178
- print(f"Error in debate: {str(e)}")
179
  raise Exception(f"Error in debate: {str(e)}")
180
 
181
  async def run_podcast_producer(state: AgentState) -> Dict[str, Any]:
182
- print("Starting podcast production")
183
 
184
  try:
185
  # Create podcast from debate
@@ -190,7 +193,7 @@ def create_workflow(tavily_api_key: str):
190
  state["supervisor_chunks"],
191
  {} # Empty quadrant analysis since we removed storage manager
192
  )
193
- print(f"Podcast production result: {podcast_result}")
194
 
195
  # Save transcript to JSON file
196
  save_transcript(
@@ -205,7 +208,7 @@ def create_workflow(tavily_api_key: str):
205
  state["current_agent"] = END
206
  return state
207
  except Exception as e:
208
- print(f"Error in podcast production: {str(e)}")
209
  raise Exception(f"Error in podcast production: {str(e)}")
210
 
211
  # Add nodes to the graph
 
1
  from typing import Dict, Any, List, Annotated, TypedDict, Union, Optional
2
  from langgraph.graph import Graph, END
3
  from agents import create_agents
4
+ from utils import save_transcript
5
  import os
6
  from dotenv import load_dotenv
7
+ import logging
8
+
9
+ # Configure logging
10
+ logger = logging.getLogger(__name__)
11
 
12
  # Load environment variables
13
  load_dotenv()
 
76
  # Define the extractor node function
77
  async def run_extractor(state: AgentState) -> Dict[str, Any]:
78
  query = state["messages"][-1]["content"]
79
+ logger.info(f"Extractor processing query: {query}")
80
 
81
  try:
82
  response = await agents["extractor"](query)
83
+ logger.info(f"Extractor response: {response}")
84
 
85
  # Update state
86
  state["extractor_data"] = response
 
91
  "skeptic": {"content": "Not started"},
92
  "believer": {"content": "Not started"}
93
  })
94
+ logger.info(f"Initial supervisor analysis: {supervisor_analysis}")
95
 
96
  state["supervisor_notes"].append(supervisor_analysis["content"])
97
  state["supervisor_chunks"].append(supervisor_analysis.get("chunks", {}))
 
100
  state["current_agent"] = "debate"
101
  return state
102
  except Exception as e:
103
+ logger.error(f"Error in extractor: {str(e)}")
104
  raise Exception(f"Error in extractor: {str(e)}")
105
 
106
  # Define the debate node function
107
  async def run_debate(state: AgentState) -> Dict[str, Any]:
108
+ logger.info(f"Debate turn {state['debate_turns']}")
109
 
110
  try:
111
  if state["debate_turns"] == 0:
112
  # First turn: both agents respond to extractor
113
+ logger.info("Starting first debate turn")
114
 
115
  # If we have context, use it to inform the agents' responses
116
  context = state.get("context", {})
 
129
  {"speaker": "skeptic", "content": skeptic_response["content"]},
130
  {"speaker": "believer", "content": believer_response["content"]}
131
  ])
132
+ logger.info(f"First turn responses added: {state['debate_history'][-2:]}")
133
  else:
134
  # Alternating responses based on agent type if specified
135
  if state["agent_type"] in ["believer", "skeptic"]:
 
139
  last_speaker = state["debate_history"][-1]["speaker"]
140
  current_speaker = "believer" if last_speaker == "skeptic" else "skeptic"
141
 
142
+ logger.info(f"Processing response for {current_speaker}")
143
 
144
  # Create context-aware input
145
  context = state.get("context", {})
 
155
  "speaker": current_speaker,
156
  "content": response["content"]
157
  })
158
+ logger.info(f"Added response: {state['debate_history'][-1]}")
159
 
160
  # Add supervisor note and chunks
161
  supervisor_analysis = await agents["supervisor"]({
 
163
  "skeptic": {"content": state["debate_history"][-1]["content"]},
164
  "believer": {"content": state["debate_history"][-2]["content"] if len(state["debate_history"]) > 1 else "Not started"}
165
  })
166
+ logger.info(f"Supervisor analysis: {supervisor_analysis}")
167
 
168
  state["supervisor_notes"].append(supervisor_analysis["content"])
169
  state["supervisor_chunks"].append(supervisor_analysis.get("chunks", {}))
170
 
171
  state["debate_turns"] += 1
172
+ logger.info(f"Debate turn {state['debate_turns']} completed")
173
 
174
  # End the workflow after 2 debate turns
175
  if state["debate_turns"] >= 2:
176
  state["current_agent"] = "podcast"
177
+ logger.info("Moving to podcast production")
178
 
179
  return state
180
  except Exception as e:
181
+ logger.error(f"Error in debate: {str(e)}")
182
  raise Exception(f"Error in debate: {str(e)}")
183
 
184
  async def run_podcast_producer(state: AgentState) -> Dict[str, Any]:
185
+ logger.info("Starting podcast production")
186
 
187
  try:
188
  # Create podcast from debate
 
193
  state["supervisor_chunks"],
194
  {} # Empty quadrant analysis since we removed storage manager
195
  )
196
+ logger.info(f"Podcast production result: {podcast_result}")
197
 
198
  # Save transcript to JSON file
199
  save_transcript(
 
208
  state["current_agent"] = END
209
  return state
210
  except Exception as e:
211
+ logger.error(f"Error in podcast production: {str(e)}")
212
  raise Exception(f"Error in podcast production: {str(e)}")
213
 
214
  # Add nodes to the graph