Spaces:
Runtime error
Runtime error
File size: 6,283 Bytes
9f0042c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from langchain.agents import initialize_agent, Tool
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms import HuggingFacePipeline
import json
import subprocess
import os
import logging
# Configure logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
# Load the LLM and tokenizer
MODEL_NAME = "unit-mesh/autodev-coder-deepseek-6.7b-finetunes"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto")
# Create a Hugging Face pipeline
hf_pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
device=0 if torch.cuda.is_available() else -1,
max_length=500,
temperature=0.7,
)
# Wrap the pipeline in a LangChain LLM
llm = HuggingFacePipeline(pipeline=hf_pipeline)
# Define tools for the agents
tools = [
Tool(
name="Code Formatter",
func=lambda x: subprocess.run(["black", "-"], input=x.encode(), capture_output=True).stdout.decode(),
description="Formats code using Black.",
),
Tool(
name="API Generator",
func=lambda x: json.dumps({"endpoints": {"example": "POST - Example endpoint."}}),
description="Generates API details from code.",
),
Tool(
name="Task Decomposer",
func=lambda x: json.dumps({"tasks": ["Design UI", "Develop Backend", "Test App", "Deploy App"]}),
description="Breaks down app requirements into smaller tasks.",
),
]
# Define prompt templates
ui_designer_prompt = PromptTemplate(
input_variables=["input"],
template="You are a UI Designer. Your task is: {input}",
)
backend_developer_prompt = PromptTemplate(
input_variables=["input"],
template="You are a Backend Developer. Your task is: {input}",
)
qa_engineer_prompt = PromptTemplate(
input_variables=["input"],
template="You are a QA Engineer. Your task is: {input}",
)
devops_engineer_prompt = PromptTemplate(
input_variables=["input"],
template="You are a DevOps Engineer. Your task is: {input}",
)
# Initialize agents
ui_designer_agent = initialize_agent(
tools=tools,
llm=llm,
agent="zero-shot-react-description",
verbose=True,
)
backend_developer_agent = initialize_agent(
tools=tools,
llm=llm,
agent="zero-shot-react-description",
verbose=True,
)
qa_engineer_agent = initialize_agent(
tools=tools,
llm=llm,
agent="zero-shot-react-description",
verbose=True,
)
devops_engineer_agent = initialize_agent(
tools=tools,
llm=llm,
agent="zero-shot-react-description",
verbose=True,
)
# Multi-Agent Workflow
def multi_agent_workflow(requirements: str) -> str:
"""
Execute a multi-agent workflow to generate a complex app.
Args:
requirements (str): App requirements.
Returns:
str: Generated app code and API details.
"""
global api_details
# Step 1: Task Decomposition
try:
task_decomposition = ui_designer_agent.run(
f"Break down the following app requirements into smaller tasks: {requirements}"
)
tasks = json.loads(task_decomposition)["tasks"]
except Exception as e:
logger.error(f"Task decomposition failed: {str(e)}")
return f"Task decomposition failed: {str(e)}"
# Step 2: Code Generation
try:
ui_code = ui_designer_agent.run(f"Generate the UI code for: {tasks[0]}")
backend_code = backend_developer_agent.run(f"Generate the backend code for: {tasks[1]}")
except Exception as e:
logger.error(f"Code generation failed: {str(e)}")
return f"Code generation failed: {str(e)}"
# Step 3: Code Formatting
try:
formatted_ui_code = ui_designer_agent.run(f"Format the following code: {ui_code}")
formatted_backend_code = backend_developer_agent.run(f"Format the following code: {backend_code}")
except Exception as e:
logger.error(f"Code formatting failed: {str(e)}")
return f"Code formatting failed: {str(e)}"
# Step 4: Integration
try:
combined_code = f"{formatted_ui_code}\n\n{formatted_backend_code}"
except Exception as e:
logger.error(f"Code integration failed: {str(e)}")
return f"Code integration failed: {str(e)}"
# Step 5: Testing
try:
test_results = qa_engineer_agent.run(f"Test the following app: {combined_code}")
except Exception as e:
logger.error(f"Testing failed: {str(e)}")
return f"Testing failed: {str(e)}"
# Step 6: Deployment
try:
deployment_status = devops_engineer_agent.run(f"Deploy the following app: {combined_code}")
except Exception as e:
logger.error(f"Deployment failed: {str(e)}")
return f"Deployment failed: {str(e)}"
# Step 7: API Documentation
try:
api_details = backend_developer_agent.run(f"Generate API details for: {combined_code}")
except Exception as e:
logger.error(f"API documentation failed: {str(e)}")
return f"API documentation failed: {str(e)}"
# Return the results
return f"""
Generated App Code:
{combined_code}
Test Results:
{test_results}
Deployment Status:
{deployment_status}
API Details:
{api_details}
"""
# Gradio Interface
def app_generator(requirements: str):
"""
Generate an app based on the provided requirements.
Args:
requirements (str): App requirements.
Returns:
str: Generated app code and API details.
"""
return multi_agent_workflow(requirements)
# Gradio UI
with gr.Blocks() as ui:
gr.Markdown("# Autonomous App Generator with LangChain Agents")
with gr.Row():
requirements_input = gr.Textbox(label="App Requirements", placeholder="Describe the app you want to build...")
generate_button = gr.Button("Generate App")
output = gr.Textbox(label="Generated App Code and API Details", lines=20)
generate_button.click(app_generator, inputs=requirements_input, outputs=output)
# Run the Gradio app
if __name__ == "__main__":
ui.launch() |