Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,11 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
|
4 |
from langchain.agents import initialize_agent, Tool
|
5 |
-
from langchain.chains import LLMChain
|
6 |
from langchain.prompts import PromptTemplate
|
7 |
-
from langchain.llms import HuggingFacePipeline
|
8 |
import json
|
9 |
import subprocess
|
10 |
-
import os
|
11 |
import logging
|
12 |
|
13 |
# Configure logging
|
@@ -24,7 +22,6 @@ hf_pipeline = pipeline(
|
|
24 |
"text-generation",
|
25 |
model=model,
|
26 |
tokenizer=tokenizer,
|
27 |
-
device=0 if torch.cuda.is_available() else -1,
|
28 |
max_length=500,
|
29 |
temperature=0.7,
|
30 |
)
|
@@ -110,64 +107,44 @@ def multi_agent_workflow(requirements: str) -> str:
|
|
110 |
Returns:
|
111 |
str: Generated app code and API details.
|
112 |
"""
|
113 |
-
global api_details
|
114 |
-
|
115 |
-
# Step 1: Task Decomposition
|
116 |
try:
|
|
|
117 |
task_decomposition = ui_designer_agent.run(
|
118 |
f"Break down the following app requirements into smaller tasks: {requirements}"
|
119 |
)
|
120 |
tasks = json.loads(task_decomposition)["tasks"]
|
121 |
-
|
122 |
-
logger.error(f"Task decomposition failed: {str(e)}")
|
123 |
-
return f"Task decomposition failed: {str(e)}"
|
124 |
|
125 |
-
|
126 |
-
try:
|
127 |
ui_code = ui_designer_agent.run(f"Generate the UI code for: {tasks[0]}")
|
128 |
backend_code = backend_developer_agent.run(f"Generate the backend code for: {tasks[1]}")
|
129 |
-
|
130 |
-
logger.
|
131 |
-
return f"Code generation failed: {str(e)}"
|
132 |
|
133 |
-
|
134 |
-
try:
|
135 |
formatted_ui_code = ui_designer_agent.run(f"Format the following code: {ui_code}")
|
136 |
formatted_backend_code = backend_developer_agent.run(f"Format the following code: {backend_code}")
|
137 |
-
|
138 |
-
logger.
|
139 |
-
return f"Code formatting failed: {str(e)}"
|
140 |
|
141 |
-
|
142 |
-
try:
|
143 |
combined_code = f"{formatted_ui_code}\n\n{formatted_backend_code}"
|
144 |
-
|
145 |
-
logger.error(f"Code integration failed: {str(e)}")
|
146 |
-
return f"Code integration failed: {str(e)}"
|
147 |
|
148 |
-
|
149 |
-
try:
|
150 |
test_results = qa_engineer_agent.run(f"Test the following app: {combined_code}")
|
151 |
-
|
152 |
-
logger.error(f"Testing failed: {str(e)}")
|
153 |
-
return f"Testing failed: {str(e)}"
|
154 |
|
155 |
-
|
156 |
-
try:
|
157 |
deployment_status = devops_engineer_agent.run(f"Deploy the following app: {combined_code}")
|
158 |
-
|
159 |
-
logger.error(f"Deployment failed: {str(e)}")
|
160 |
-
return f"Deployment failed: {str(e)}"
|
161 |
|
162 |
-
|
163 |
-
try:
|
164 |
api_details = backend_developer_agent.run(f"Generate API details for: {combined_code}")
|
165 |
-
|
166 |
-
logger.error(f"API documentation failed: {str(e)}")
|
167 |
-
return f"API documentation failed: {str(e)}"
|
168 |
|
169 |
-
|
170 |
-
|
171 |
Generated App Code:
|
172 |
{combined_code}
|
173 |
|
@@ -180,6 +157,9 @@ Deployment Status:
|
|
180 |
API Details:
|
181 |
{api_details}
|
182 |
"""
|
|
|
|
|
|
|
183 |
|
184 |
# Gradio Interface
|
185 |
def app_generator(requirements: str):
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
4 |
+
from langchain_community.llms import HuggingFacePipeline
|
5 |
from langchain.agents import initialize_agent, Tool
|
|
|
6 |
from langchain.prompts import PromptTemplate
|
|
|
7 |
import json
|
8 |
import subprocess
|
|
|
9 |
import logging
|
10 |
|
11 |
# Configure logging
|
|
|
22 |
"text-generation",
|
23 |
model=model,
|
24 |
tokenizer=tokenizer,
|
|
|
25 |
max_length=500,
|
26 |
temperature=0.7,
|
27 |
)
|
|
|
107 |
Returns:
|
108 |
str: Generated app code and API details.
|
109 |
"""
|
|
|
|
|
|
|
110 |
try:
|
111 |
+
# Step 1: Task Decomposition
|
112 |
task_decomposition = ui_designer_agent.run(
|
113 |
f"Break down the following app requirements into smaller tasks: {requirements}"
|
114 |
)
|
115 |
tasks = json.loads(task_decomposition)["tasks"]
|
116 |
+
logger.info(f"Tasks: {tasks}")
|
|
|
|
|
117 |
|
118 |
+
# Step 2: Code Generation
|
|
|
119 |
ui_code = ui_designer_agent.run(f"Generate the UI code for: {tasks[0]}")
|
120 |
backend_code = backend_developer_agent.run(f"Generate the backend code for: {tasks[1]}")
|
121 |
+
logger.info(f"UI Code: {ui_code}")
|
122 |
+
logger.info(f"Backend Code: {backend_code}")
|
|
|
123 |
|
124 |
+
# Step 3: Code Formatting
|
|
|
125 |
formatted_ui_code = ui_designer_agent.run(f"Format the following code: {ui_code}")
|
126 |
formatted_backend_code = backend_developer_agent.run(f"Format the following code: {backend_code}")
|
127 |
+
logger.info(f"Formatted UI Code: {formatted_ui_code}")
|
128 |
+
logger.info(f"Formatted Backend Code: {formatted_backend_code}")
|
|
|
129 |
|
130 |
+
# Step 4: Integration
|
|
|
131 |
combined_code = f"{formatted_ui_code}\n\n{formatted_backend_code}"
|
132 |
+
logger.info(f"Combined Code: {combined_code}")
|
|
|
|
|
133 |
|
134 |
+
# Step 5: Testing
|
|
|
135 |
test_results = qa_engineer_agent.run(f"Test the following app: {combined_code}")
|
136 |
+
logger.info(f"Test Results: {test_results}")
|
|
|
|
|
137 |
|
138 |
+
# Step 6: Deployment
|
|
|
139 |
deployment_status = devops_engineer_agent.run(f"Deploy the following app: {combined_code}")
|
140 |
+
logger.info(f"Deployment Status: {deployment_status}")
|
|
|
|
|
141 |
|
142 |
+
# Step 7: API Documentation
|
|
|
143 |
api_details = backend_developer_agent.run(f"Generate API details for: {combined_code}")
|
144 |
+
logger.info(f"API Details: {api_details}")
|
|
|
|
|
145 |
|
146 |
+
# Return the results
|
147 |
+
return f"""
|
148 |
Generated App Code:
|
149 |
{combined_code}
|
150 |
|
|
|
157 |
API Details:
|
158 |
{api_details}
|
159 |
"""
|
160 |
+
except Exception as e:
|
161 |
+
logger.error(f"Workflow failed: {str(e)}")
|
162 |
+
return f"Workflow failed: {str(e)}"
|
163 |
|
164 |
# Gradio Interface
|
165 |
def app_generator(requirements: str):
|