Spaces:
Running
Running
File size: 9,694 Bytes
01e5385 b5504a7 51fec96 54ce177 0a3c7ba 54ce177 01e5385 54ce177 01e5385 54ce177 b5504a7 54ce177 51fec96 54ce177 a9ef0b6 54ce177 a9ef0b6 54ce177 a9ef0b6 54ce177 a9ef0b6 54ce177 235536e b5504a7 01e5385 2db68c7 01e5385 54ce177 01e5385 b5504a7 01e5385 2db68c7 01e5385 5ae4817 01e5385 2db68c7 01e5385 2db68c7 01e5385 54ce177 b5504a7 54ce177 b5504a7 54ce177 b5504a7 51fec96 54ce177 b5504a7 54ce177 51fec96 54ce177 bf284e0 54ce177 2db68c7 54ce177 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
Copy
import os
import json
import time
from typing import Dict, List, Tuple
import gradio as gr
import streamlit as st
from huggingface_hub import InferenceClient, hf_hub_url, cached_download
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from rich import print as rprint
from rich.panel import Panel
from rich.progress import track
from rich.table import Table
import subprocess
import threading
# --- Constants ---
MODEL_NAME = "bigscience/bloom-1b7"
MAX_NEW_TOKENS = 1024
TEMPERATURE = 0.7
TOP_P = 0.95
REPETITION_PENALTY = 1.2
# --- Model & Tokenizer ---
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
# --- Agents ---
agents = {
"WEB_DEV": {
"description": "Expert in web development technologies and frameworks.",
"skills": ["HTML", "CSS", "JavaScript", "React", "Vue.js", "Flask", "Django", "Node.js", "Express.js"],
"system_prompt": "You are a web development expert. Your goal is to assist the user in building and deploying web applications. Provide code snippets, explanations, and guidance on best practices.",
},
"AI_SYSTEM_PROMPT": {
"description": "Expert in designing and implementing AI systems.",
"skills": ["Machine Learning", "Deep Learning", "Natural Language Processing", "Computer Vision", "Reinforcement Learning"],
"system_prompt": "You are an AI system expert. Your goal is to assist the user in designing and implementing AI systems. Provide code snippets, explanations, and guidance on best practices.",
},
"PYTHON_CODE_DEV": {
"description": "Expert in Python programming and development.",
"skills": ["Python", "Data Structures", "Algorithms", "Object-Oriented Programming", "Functional Programming"],
"system_prompt": "You are a Python code development expert. Your goal is to assist the user in writing and debugging Python code. Provide code snippets, explanations, and guidance on best practices.",
},
"CODE_REVIEW_ASSISTANT": {
"description": "Expert in code review and quality assurance.",
"skills": ["Code Style", "Best Practices", "Security", "Performance", "Maintainability"],
"system_prompt": "You are a code review assistant. Your goal is to assist the user in reviewing code for quality and efficiency. Provide feedback on code style, best practices, security, performance, and maintainability.",
},
"CONTENT_WRITER_EDITOR": {
"description": "Expert in content writing and editing.",
"skills": ["Grammar", "Style", "Clarity", "Conciseness", "SEO"],
"system_prompt": "You are a content writer and editor. Your goal is to assist the user in creating high-quality content. Provide suggestions on grammar, style, clarity, conciseness, and SEO.",
},
"QUESTION_GENERATOR": {
"description": "Expert in generating questions for learning and assessment.",
"skills": ["Question Types", "Cognitive Levels", "Assessment Design"],
"system_prompt": "You are a question generator. Your goal is to assist the user in generating questions for learning and assessment. Provide questions that are relevant to the topic and aligned with the cognitive levels.",
},
"HUGGINGFACE_FILE_DEV": {
"description": "Expert in developing Hugging Face files for machine learning models.",
"skills": ["Transformers", "Datasets", "Model Training", "Model Deployment"],
"system_prompt": "You are a Hugging Face file development expert. Your goal is to assist the user in creating and deploying Hugging Face files for machine learning models. Provide code snippets, explanations, and guidance on best practices.",
},
}
# --- Session State ---
if "workspace_projects" not in st.session_state:
st.session_state.workspace_projects = {}
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "active_agent" not in st.session_state:
st.session_state.active_agent = None
if "selected_agents" not in st.session_state:
st.session_state.selected_agents = []
if "current_project" not in st.session_state:
st.session_state.current_project = None
if "current_agent" not in st.session_state:
st.session_state.current_agent = None
if "current_cluster" not in st.session_state:
st.session_state.current_cluster = None
if "hf_token" not in st.session_state:
st.session_state.hf_token = None
if "repo_name" not in st.session_state:
st.session_state.repo_name = None
if "selected_model" not in st.session_state:
st.session_state.selected_model = None
def add_code_to_workspace(project_name: str, code: str, file_name: str):
if project_name in st.session_state.workspace_projects:
st.session_state.workspace_projects[project_name]['files'].append({'file_name': file_name, 'code': code})
return f"Added code to {file_name} in project {project_name}"
else:
return f"Project {project_name} does not exist"
def terminal_interface(command: str, project_name: str):
if project_name in st.session_state.workspace_projects:
result = subprocess.run(command, cwd=project_name, shell=True, capture_output=True, text=True)
return result.stdout + result.stderr
else:
return f"Project {project_name} does not exist"
def chat_interface(message: str, selected_agents: List[str]):
responses = {}
for agent in selected_agents:
# Assuming a function `get_agent_response` that fetches the response from the agent
responses[agent] = get_agent_response(message, agents[agent]['system_prompt'])
return responses
def get_agent_response(message: str, system_prompt: str):
# This function should implement how to get the response from the agent
pass
# --- Streamlit UI ---
st.title("DevToolKit: AI-Powered Development Environment")
# --- Project Management ---
st.header("Project Management")
project_name = st.text_input("Enter project name:")
if st.button("Create Project"):
if project_name not in st.session_state.workspace_projects:
st.session_state.workspace_projects[project_name] = {'files': []}
st.success(f"Created project: {project_name}")
else:
st.warning(f"Project {project_name} already exists")
# --- Code Addition ---
st.subheader("Add Code to Workspace")
code_to_add = st.text_area("Enter code to add to workspace:")
file_name = st.text_input("Enter file name (e.g. 'app.py'):")
if st.button("Add Code"):
add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
st.success(add_code_status)
# --- Terminal Interface ---
st.subheader("Terminal (Workspace Context)")
terminal_input = st.text_input("Enter a command within the workspace:")
if st.button("Run Command"):
terminal_output = terminal_interface(terminal_input, project_name)
st.code(terminal_output, language="bash")
# --- Chat Interface ---
st.subheader("Chat with AI Agents")
selected_agents = st.multiselect("Select AI agents", list(agents.keys()), key="agent_select")
st.session_state.selected_agents = selected_agents
agent_chat_input = st.text_area("Enter your message for the agents:", key="agent_input")
if st.button("Send to Agents", key="agent_send"):
agent_chat_response = chat_interface(agent_chat_input, selected_agents)
st.write(agent_chat_response)
# --- Agent Control ---
st.subheader("Agent Control")
for agent_name in agents:
agent = agents[agent_name]
with st.expander(f"{agent_name} ({agent['description']})"):
if st.button(f"Activate {agent_name}", key=f"activate_{agent_name}"):
st.session_state.active_agent = agent_name
st.success(f"{agent_name} activated.")
if st.button(f"Deactivate {agent_name}", key=f"deactivate_{agent_name}"):
st.session_state.active_agent = None
st.success(f"{agent_name} deactivated.")
# --- Automate Build Process ---
st.subheader("Automate Build Process")
if st.button("Automate"):
if st.session_state.selected_agents:
run_autonomous_build(st.session_state.selected_agents, project_name)
else:
st.warning("Please select at least one agent.")
# --- Display Information ---
st.sidebar.subheader("Current State")
st.sidebar.json(st.session_state)
if st.session_state.active_agent:
display_agent_info(st.session_state.active_agent)
display_workspace_projects()
display_chat_history()
# --- Gradio Interface ---
additional_inputs = [
gr.Dropdown(label="Agents", choices=[s for s in agents.keys()], value=list(agents.keys())[0], interactive=True),
gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
gr.Slider(label="Max new tokens", value=MAX_NEW_TOKENS, minimum=0, maximum=10240, step=64, interactive=True, info="The maximum numbers of new tokens"),
gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
]
examples = [
["Create a simple web application using Flask", "WEB_DEV"],
["Generate a Python script to perform a linear regression analysis", "PYTHON_CODE_DEV"],
["Create a Dockerfile for a Node.js application", "AI_SYSTEM_PROMPT"],
# Add more examples as needed
]
gr.ChatInterface(
fn=chat_interface,
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
additional_inputs=additional_inputs,
title="DevToolKit AI Assistant",
examples=examples,
concurrency_limit=20,
).launch(show_api=True) |