nananie143 commited on
Commit
fefcc45
·
1 Parent(s): bc8d692

Fixed app generation workflow with proper async execution and error handling

Browse files
Files changed (1) hide show
  1. app.py +90 -108
app.py CHANGED
@@ -777,7 +777,7 @@ class EnhancedAIFlow(AIFlow):
777
  self.contexts: Dict[AgentRole, AgentContext] = {}
778
  self.global_context = {}
779
  self.requirements = ""
780
-
781
  def initialize_flow(self):
782
  """Initialize the AI Flow with agent relationships and dependencies."""
783
  # Create nodes for each agent role
@@ -884,7 +884,7 @@ Focus: setup, API docs, guides"""
884
  except Exception as e:
885
  logger.error(f"Failed to generate prompt for {role}: {str(e)}")
886
  raise
887
-
888
  async def execute_flow(self, requirements: str) -> str:
889
  """Execute the AI Flow and build the project."""
890
  try:
@@ -892,20 +892,17 @@ Focus: setup, API docs, guides"""
892
  self.requirements = requirements
893
  self.initialize_flow()
894
 
895
- # Extract app name
896
  app_name = requirements.split()[0].lower().replace(" ", "_")
897
 
898
  # Execute agents in parallel where possible
899
  paths = list(nx.all_simple_paths(self.flow_graph, AgentRole.ARCHITECT, AgentRole.DOCUMENTATION_WRITER))
900
- await self._execute_paths(paths)
901
 
902
  # Generate project structure and documentation
903
  project_structure = generate_project_structure(app_name, self.contexts[AgentRole.ARCHITECT].artifacts)
904
  documentation = generate_documentation(app_name, requirements, self.contexts[AgentRole.DOCUMENTATION_WRITER].artifacts)
905
 
906
- # Create downloadable output
907
- download_path = self.output_manager.create_download(app_name)
908
-
909
  return f"""
910
  # {app_name.title()} - Generated Application
911
 
@@ -917,9 +914,6 @@ Focus: setup, API docs, guides"""
917
  ## Documentation
918
  {documentation}
919
 
920
- ## Download
921
- To download your project, use this path: {download_path}
922
-
923
  ## Next Steps
924
  1. Review the generated architecture and components
925
  2. Set up the development environment
@@ -936,90 +930,102 @@ For any issues or questions, please refer to the documentation or create an issu
936
  finally:
937
  if torch.cuda.is_available():
938
  torch.cuda.empty_cache()
939
-
940
- async def _execute_paths(self, paths: List[List[AgentRole]]):
941
  """Execute all paths in the flow graph."""
942
  try:
 
 
943
  for path in paths:
944
  for role in path:
945
- if self.contexts[role].state != FlowState.COMPLETED:
946
- await self._execute_agent(role)
 
 
 
 
 
 
 
 
 
 
 
 
947
  except Exception as e:
948
  logger.error(f"Failed to execute paths: {str(e)}")
949
  raise
950
-
951
- async def _execute_agent(self, role: AgentRole):
952
  """Execute a single agent's tasks with enhanced context."""
953
  try:
954
- context = self.contexts[role]
955
- if context.state == FlowState.COMPLETED:
956
- return
957
-
958
- context.state = FlowState.RUNNING
959
  prompt = self._generate_prompt(role)
960
 
961
- # Execute agent task
962
  result = await self._execute_agent_task(role, prompt)
963
 
964
- # Update context with results
965
- context.artifacts.update(result)
966
- context.state = FlowState.COMPLETED
 
 
967
 
968
  except Exception as e:
969
- context.state = FlowState.FAILED
970
  logger.error(f"Failed to execute agent {role}: {str(e)}")
 
971
  raise
972
-
973
- async def _execute_agent_task(self, role: AgentRole, prompt: str) -> Dict[str, str]:
974
  """Execute a specific agent's task with the given prompt."""
975
  try:
976
- # Get the appropriate agent for the role
977
  agent = get_agent(role)
978
 
979
  # Execute the agent's task
980
  result = await asyncio.to_thread(agent.run, prompt)
981
 
982
  # Process and return the result
983
- return {"output": result}
984
 
985
  except Exception as e:
986
- logger.error(f"Failed to execute agent task for {role}: {str(e)}")
987
  raise
988
 
989
  # Update the multi_agent_workflow function to use AI Flows
990
  async def multi_agent_workflow(requirements: str) -> str:
991
- """
992
- Execute a multi-agent workflow using AI Flows to generate a complex app.
993
- Args:
994
- requirements (str): App requirements.
995
- Returns:
996
- str: Generated app code and API details.
997
- """
998
  try:
999
- # Initialize AI Flow
1000
  ai_flow = EnhancedAIFlow()
1001
 
1002
- # Execute the flow with requirements
1003
  result = await ai_flow.execute_flow(requirements)
1004
 
1005
- # Return the compiled results
1006
  return result
1007
  except Exception as e:
1008
- logger.error(f"Failed to execute multi-agent workflow: {str(e)}")
1009
  raise
1010
 
1011
  # Update the app_generator function to handle async execution
1012
  async def app_generator(requirements: str) -> Dict[str, str]:
1013
  """Generate an app based on the provided requirements using AI Flows."""
1014
  try:
1015
- # Execute the multi-agent workflow
1016
- result = await multi_agent_workflow(requirements)
 
 
 
 
 
 
 
1017
  return {
1018
  "output": result,
1019
- "download_path": None
1020
  }
1021
  except Exception as e:
1022
- logger.error(f"Failed to generate app: {str(e)}")
1023
  raise
1024
 
1025
  async def stream_output(requirements, progress=gr.Progress()):
@@ -1049,41 +1055,29 @@ async def stream_output(requirements, progress=gr.Progress()):
1049
 
1050
  try:
1051
  # Run the app generator with a timeout
1052
- result = await asyncio.wait_for(
1053
- app_generator(requirements),
1054
- timeout=60 # 60 second timeout
1055
- )
1056
-
1057
- if result:
1058
- # Create download file
1059
- try:
1060
- download_path = result.get("download_path")
1061
- if download_path:
1062
- stream_handler.update(" Generation completed successfully!", "Success")
1063
- yield result.get("output", ""), download_path, "\n".join(stream_handler.output), "Success"
1064
- else:
1065
- error_msg = " Failed to create download file"
1066
- stream_handler.update(error_msg, "Error")
1067
- yield result.get("output", ""), None, "\n".join(stream_handler.output), "Error"
1068
- except Exception as e:
1069
- error_msg = f" Error creating download: {str(e)}"
1070
- stream_handler.update(error_msg, "Error")
1071
- yield result.get("output", ""), None, "\n".join(stream_handler.output), "Error"
1072
- else:
1073
- error_msg = " Failed to generate app"
1074
- stream_handler.update(error_msg, "Error")
1075
- yield "", None, "\n".join(stream_handler.output), "Error"
1076
 
 
 
 
 
 
 
 
1077
  except asyncio.TimeoutError:
1078
- error_msg = " Generation timed out after 60 seconds"
1079
- stream_handler.update(error_msg, "Error")
1080
- yield "", None, "\n".join(stream_handler.output), "Error"
1081
 
1082
  except Exception as e:
1083
- error_msg = f" Error: {str(e)}"
1084
- logger.error(error_msg)
1085
- stream_handler.update(error_msg, "Error")
1086
- yield error_msg, None, "\n".join(stream_handler.output), "Error"
 
 
 
1087
 
1088
  class StreamHandler:
1089
  """Handles streaming output for the Gradio interface."""
@@ -1193,41 +1187,29 @@ with gr.Blocks(theme=gr.themes.Soft()) as ui:
1193
 
1194
  try:
1195
  # Run the app generator with a timeout
1196
- result = await asyncio.wait_for(
1197
- app_generator(requirements),
1198
- timeout=60 # 60 second timeout
1199
- )
1200
-
1201
- if result:
1202
- # Create download file
1203
- try:
1204
- download_path = result.get("download_path")
1205
- if download_path:
1206
- stream_handler.update(" Generation completed successfully!", "Success")
1207
- yield result.get("output", ""), download_path, "\n".join(stream_handler.output), "Success"
1208
- else:
1209
- error_msg = " Failed to create download file"
1210
- stream_handler.update(error_msg, "Error")
1211
- yield result.get("output", ""), None, "\n".join(stream_handler.output), "Error"
1212
- except Exception as e:
1213
- error_msg = f" Error creating download: {str(e)}"
1214
- stream_handler.update(error_msg, "Error")
1215
- yield result.get("output", ""), None, "\n".join(stream_handler.output), "Error"
1216
- else:
1217
- error_msg = " Failed to generate app"
1218
- stream_handler.update(error_msg, "Error")
1219
- yield "", None, "\n".join(stream_handler.output), "Error"
1220
 
 
 
 
 
 
 
 
1221
  except asyncio.TimeoutError:
1222
- error_msg = " Generation timed out after 60 seconds"
1223
- stream_handler.update(error_msg, "Error")
1224
- yield "", None, "\n".join(stream_handler.output), "Error"
1225
 
1226
  except Exception as e:
1227
- error_msg = f" Error: {str(e)}"
1228
- logger.error(error_msg)
1229
- stream_handler.update(error_msg, "Error")
1230
- yield error_msg, None, "\n".join(stream_handler.output), "Error"
 
 
 
1231
 
1232
  def cancel_generation():
1233
  """Cancel the current generation process."""
 
777
  self.contexts: Dict[AgentRole, AgentContext] = {}
778
  self.global_context = {}
779
  self.requirements = ""
780
+
781
  def initialize_flow(self):
782
  """Initialize the AI Flow with agent relationships and dependencies."""
783
  # Create nodes for each agent role
 
884
  except Exception as e:
885
  logger.error(f"Failed to generate prompt for {role}: {str(e)}")
886
  raise
887
+
888
  async def execute_flow(self, requirements: str) -> str:
889
  """Execute the AI Flow and build the project."""
890
  try:
 
892
  self.requirements = requirements
893
  self.initialize_flow()
894
 
895
+ # Extract app name from requirements
896
  app_name = requirements.split()[0].lower().replace(" ", "_")
897
 
898
  # Execute agents in parallel where possible
899
  paths = list(nx.all_simple_paths(self.flow_graph, AgentRole.ARCHITECT, AgentRole.DOCUMENTATION_WRITER))
900
+ results = await self._execute_paths(paths)
901
 
902
  # Generate project structure and documentation
903
  project_structure = generate_project_structure(app_name, self.contexts[AgentRole.ARCHITECT].artifacts)
904
  documentation = generate_documentation(app_name, requirements, self.contexts[AgentRole.DOCUMENTATION_WRITER].artifacts)
905
 
 
 
 
906
  return f"""
907
  # {app_name.title()} - Generated Application
908
 
 
914
  ## Documentation
915
  {documentation}
916
 
 
 
 
917
  ## Next Steps
918
  1. Review the generated architecture and components
919
  2. Set up the development environment
 
930
  finally:
931
  if torch.cuda.is_available():
932
  torch.cuda.empty_cache()
933
+
934
+ async def _execute_paths(self, paths: List[List[AgentRole]]) -> List[str]:
935
  """Execute all paths in the flow graph."""
936
  try:
937
+ # Execute paths in parallel
938
+ tasks = []
939
  for path in paths:
940
  for role in path:
941
+ if self.contexts[role].state == FlowState.PENDING:
942
+ tasks.append(self._execute_agent(role))
943
+ self.contexts[role].state = FlowState.RUNNING
944
+
945
+ # Wait for all tasks to complete
946
+ results = await asyncio.gather(*tasks, return_exceptions=True)
947
+
948
+ # Process results
949
+ for result in results:
950
+ if isinstance(result, Exception):
951
+ raise result
952
+
953
+ return results
954
+
955
  except Exception as e:
956
  logger.error(f"Failed to execute paths: {str(e)}")
957
  raise
958
+
959
+ async def _execute_agent(self, role: AgentRole) -> str:
960
  """Execute a single agent's tasks with enhanced context."""
961
  try:
962
+ # Generate prompt
 
 
 
 
963
  prompt = self._generate_prompt(role)
964
 
965
+ # Execute agent's task
966
  result = await self._execute_agent_task(role, prompt)
967
 
968
+ # Update context
969
+ self.contexts[role].state = FlowState.COMPLETED
970
+ self.contexts[role].artifacts["output"] = result
971
+
972
+ return result
973
 
974
  except Exception as e:
 
975
  logger.error(f"Failed to execute agent {role}: {str(e)}")
976
+ self.contexts[role].state = FlowState.FAILED
977
  raise
978
+
979
+ async def _execute_agent_task(self, role: AgentRole, prompt: str) -> str:
980
  """Execute a specific agent's task with the given prompt."""
981
  try:
982
+ # Get agent
983
  agent = get_agent(role)
984
 
985
  # Execute the agent's task
986
  result = await asyncio.to_thread(agent.run, prompt)
987
 
988
  # Process and return the result
989
+ return result
990
 
991
  except Exception as e:
992
+ logger.error(f"Agent task execution failed for {role}: {str(e)}")
993
  raise
994
 
995
  # Update the multi_agent_workflow function to use AI Flows
996
  async def multi_agent_workflow(requirements: str) -> str:
997
+ """Execute a multi-agent workflow using AI Flows to generate a complex app."""
 
 
 
 
 
 
998
  try:
999
+ # Create AI Flow instance
1000
  ai_flow = EnhancedAIFlow()
1001
 
1002
+ # Generate the app
1003
  result = await ai_flow.execute_flow(requirements)
1004
 
 
1005
  return result
1006
  except Exception as e:
1007
+ logger.error(f"Multi-agent workflow failed: {str(e)}")
1008
  raise
1009
 
1010
  # Update the app_generator function to handle async execution
1011
  async def app_generator(requirements: str) -> Dict[str, str]:
1012
  """Generate an app based on the provided requirements using AI Flows."""
1013
  try:
1014
+ # Create AI Flow instance
1015
+ ai_flow = EnhancedAIFlow()
1016
+
1017
+ # Generate the app
1018
+ result = await ai_flow.execute_flow(requirements)
1019
+
1020
+ # Create downloadable output
1021
+ download_path = ai_flow.output_manager.create_download("generated_app")
1022
+
1023
  return {
1024
  "output": result,
1025
+ "download_path": str(download_path) if download_path else None
1026
  }
1027
  except Exception as e:
1028
+ logger.error(f"App generation failed: {str(e)}")
1029
  raise
1030
 
1031
  async def stream_output(requirements, progress=gr.Progress()):
 
1055
 
1056
  try:
1057
  # Run the app generator with a timeout
1058
+ async with asyncio.timeout(60): # 60 second timeout
1059
+ result = await app_generator(requirements)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1060
 
1061
+ # Update output with result
1062
+ if result["output"]:
1063
+ stream_handler.update("\n" + result["output"], "Completed")
1064
+ yield result["output"], result["download_path"], "\n".join(stream_handler.output), "Completed"
1065
+ else:
1066
+ raise Exception("No output generated")
1067
+
1068
  except asyncio.TimeoutError:
1069
+ stream_handler.update("\nApp generation timed out after 60 seconds", "Failed")
1070
+ yield None, None, "\n".join(stream_handler.output), "Failed"
1071
+ raise
1072
 
1073
  except Exception as e:
1074
+ error_msg = f"\nError: {str(e)}"
1075
+ stream_handler.update(error_msg, "Failed")
1076
+ yield None, None, "\n".join(stream_handler.output), "Failed"
1077
+ raise
1078
+ finally:
1079
+ if torch.cuda.is_available():
1080
+ torch.cuda.empty_cache()
1081
 
1082
  class StreamHandler:
1083
  """Handles streaming output for the Gradio interface."""
 
1187
 
1188
  try:
1189
  # Run the app generator with a timeout
1190
+ async with asyncio.timeout(60): # 60 second timeout
1191
+ result = await app_generator(requirements)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1192
 
1193
+ # Update output with result
1194
+ if result["output"]:
1195
+ stream_handler.update("\n" + result["output"], "Completed")
1196
+ yield result["output"], result["download_path"], "\n".join(stream_handler.output), "Completed"
1197
+ else:
1198
+ raise Exception("No output generated")
1199
+
1200
  except asyncio.TimeoutError:
1201
+ stream_handler.update("\nApp generation timed out after 60 seconds", "Failed")
1202
+ yield None, None, "\n".join(stream_handler.output), "Failed"
1203
+ raise
1204
 
1205
  except Exception as e:
1206
+ error_msg = f"\nError: {str(e)}"
1207
+ stream_handler.update(error_msg, "Failed")
1208
+ yield None, None, "\n".join(stream_handler.output), "Failed"
1209
+ raise
1210
+ finally:
1211
+ if torch.cuda.is_available():
1212
+ torch.cuda.empty_cache()
1213
 
1214
  def cancel_generation():
1215
  """Cancel the current generation process."""