acecalisto3 commited on
Commit
2db68c7
·
verified ·
1 Parent(s): 54ce177

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -88
app.py CHANGED
@@ -85,100 +85,34 @@ if "repo_name" not in st.session_state:
85
  st.session_state.repo_name = None
86
  if "selected_model" not in st.session_state:
87
  st.session_state.selected_model = None
88
- if "selected_code_model" not in st.session_state:
89
- st.session_state.selected_code_model = None
90
- if "selected_chat_model" not in st.session_state:
91
- st.session_state.selected_chat_model = None
92
 
93
- # --- Functions ---
94
- def format_prompt(message: str, history: List[Tuple[str, str]], agent_prompt: str) -> str:
95
- """Formats the prompt for the language model."""
96
- prompt = "<s>"
97
- for user_prompt, bot_response in history:
98
- prompt += f"[INST] {user_prompt} [/INST]"
99
- prompt += f" {bot_response}</s> "
100
- prompt += f"[INST] {agent_prompt}, {message} [/INST]"
101
- return prompt
102
-
103
- def generate_response(prompt: str, agent_name: str) -> str:
104
- """Generates a response from the language model."""
105
- agent = agents[agent_name]
106
- system_prompt = agent["system_prompt"]
107
- generate_kwargs = dict(
108
- temperature=TEMPERATURE,
109
- max_new_tokens=MAX_NEW_TOKENS,
110
- top_p=TOP_P,
111
- repetition_penalty=REPETITION_PENALTY,
112
- do_sample=True,
113
- )
114
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
115
- output = model.generate(input_ids, **generate_kwargs)
116
- response = tokenizer.decode(output[0], skip_special_tokens=True)
117
- return response
118
-
119
- def chat_interface(chat_input: str, agent_names: List[str]) -> str:
120
- """Handles chat interactions with the selected agents."""
121
- if agent_names:
122
- responses = []
123
- for agent_name in agent_names:
124
- prompt = format_prompt(chat_input, st.session_state.chat_history, agents[agent_name]["system_prompt"])
125
- response = generate_response(prompt, agent_name)
126
- responses.append(f"{agent_name}: {response}")
127
- return "\n".join(responses)
128
  else:
129
- return "Please select at least one agent."
130
 
131
  def terminal_interface(command: str, project_name: str) -> str:
132
- """Executes a command within the specified project directory."""
133
  try:
134
- result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_name)
135
- return result.stdout if result.returncode == 0 else result.stderr
 
 
 
136
  except Exception as e:
137
  return str(e)
138
 
139
- def add_code_to_workspace(project_name: str, code: str, file_name: str) -> str:
140
- """Adds code to a workspace project."""
141
- project_path = os.path.join(os.getcwd(), project_name)
142
- if not os.path.exists(project_path):
143
- os.makedirs(project_path)
144
- file_path = os.path.join(project_path, file_name)
145
- with open(file_path, 'w') as file:
146
- file.write(code)
147
- if project_name not in st.session_state.workspace_projects:
148
- st.session_state.workspace_projects[project_name] = {'files': []}
149
- st.session_state.workspace_projects[project_name]['files'].append(file_name)
150
- return f"Added {file_name} to {project_name}"
151
-
152
- def display_workspace_projects():
153
- """Displays a table of workspace projects."""
154
- table = Table(title="Workspace Projects")
155
- table.add_column("Project Name", style="cyan", no_wrap=True)
156
- table.add_column("Files", style="magenta")
157
- for project_name, details in st.session_state.workspace_projects.items():
158
- table.add_row(project_name, ", ".join(details['files']))
159
- rprint(Panel(table, title="[bold blue]Workspace Projects[/bold blue]"))
160
-
161
- def display_chat_history():
162
- """Displays the chat history in a formatted way."""
163
- table = Table(title="Chat History")
164
- table.add_column("User", style="cyan", no_wrap=True)
165
- table.add_column("Agent", style="magenta")
166
- for user_prompt, bot_response in st.session_state.chat_history:
167
- table.add_row(user_prompt, bot_response)
168
- rprint(Panel(table, title="[bold blue]Chat History[/bold blue]"))
169
-
170
- def display_agent_info(agent_name: str):
171
- """Displays information about the selected agent."""
172
- agent = agents[agent_name]
173
- table = Table(title=f"{agent_name} - Agent Information")
174
- table.add_column("Description", style="cyan", no_wrap=True)
175
- table.add_column("Skills", style="magenta")
176
- table.add_row(agent["description"], ", ".join(agent["skills"]))
177
- rprint(Panel(table, title=f"[bold blue]{agent_name} - Agent Information[/bold blue]"))
178
 
179
- def run_autonomous_build(agent_names: List[str], project_name: str):
180
- """Runs the autonomous build process."""
181
- for agent_name in agent_names:
182
  agent = agents[agent_name]
183
  chat_history = st.session_state.chat_history
184
  workspace_projects = st.session_state.workspace_projects
@@ -186,7 +120,22 @@ def run_autonomous_build(agent_names: List[str], project_name: str):
186
  rprint(Panel(summary, title="[bold blue]Current State[/bold blue]"))
187
  rprint(Panel(next_step, title="[bold blue]Next Step[/bold blue]"))
188
  # Implement logic for autonomous build based on the current state
189
- # ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
 
191
  # --- Streamlit UI ---
192
  st.title("DevToolKit: AI-Powered Development Environment")
@@ -247,7 +196,7 @@ if st.button("Automate"):
247
 
248
  # --- Display Information ---
249
  st.sidebar.subheader("Current State")
250
- st.sidebar.json(st.session_state.current_state)
251
  if st.session_state.active_agent:
252
  display_agent_info(st.session_state.active_agent)
253
  display_workspace_projects()
@@ -258,7 +207,7 @@ additional_inputs = [
258
  gr.Dropdown(label="Agents", choices=[s for s in agents.keys()], value=list(agents.keys())[0], interactive=True),
259
  gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
260
  gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
261
- gr.Slider(label="Max new tokens", value=MAX_NEW_TOKENS, minimum=0, maximum=1000*10, step=64, interactive=True, info="The maximum numbers of new tokens"),
262
  gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
263
  gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
264
  ]
 
85
  st.session_state.repo_name = None
86
  if "selected_model" not in st.session_state:
87
  st.session_state.selected_model = None
 
 
 
 
88
 
89
+ def add_code_to_workspace(project_name: str, code: str, file_name: str) -> str:
90
+ if project_name in st.session_state.workspace_projects:
91
+ project = st.session_state.workspace_projects[project_name]
92
+ project['files'].append({'file_name': file_name, 'code': code})
93
+ return f"Code added to project {project_name}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  else:
95
+ return f"Project {project_name} does not exist."
96
 
97
  def terminal_interface(command: str, project_name: str) -> str:
 
98
  try:
99
+ project = st.session_state.workspace_projects.get(project_name, {})
100
+ workspace_dir = os.path.join("workspace", project_name)
101
+ os.makedirs(workspace_dir, exist_ok=True)
102
+ result = subprocess.run(command, shell=True, cwd=workspace_dir, capture_output=True, text=True)
103
+ return result.stdout + result.stderr
104
  except Exception as e:
105
  return str(e)
106
 
107
+ def chat_interface(message: str, selected_agents: List[str]) -> str:
108
+ responses = {}
109
+ for agent_name in selected_agents:
110
+ agent = agents[agent_name]
111
+ responses[agent_name] = agent['system_prompt'] + " " + message
112
+ return json.dumps(responses, indent=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
+ def run_autonomous_build(selected_agents: List[str], project_name: str):
115
+ for agent_name in selected_agents:
 
116
  agent = agents[agent_name]
117
  chat_history = st.session_state.chat_history
118
  workspace_projects = st.session_state.workspace_projects
 
120
  rprint(Panel(summary, title="[bold blue]Current State[/bold blue]"))
121
  rprint(Panel(next_step, title="[bold blue]Next Step[/bold blue]"))
122
  # Implement logic for autonomous build based on the current state
123
+
124
+ def display_agent_info(agent_name: str):
125
+ agent = agents[agent_name]
126
+ st.sidebar.subheader(f"Agent: {agent_name}")
127
+ st.sidebar.write(agent['description'])
128
+ st.sidebar.write("Skills: " + ", ".join(agent['skills']))
129
+ st.sidebar.write("System Prompt: " + agent['system_prompt'])
130
+
131
+ def display_workspace_projects():
132
+ st.sidebar.subheader("Workspace Projects")
133
+ for project_name, details in st.session_state.workspace_projects.items():
134
+ st.sidebar.write(f"{project_name}: {details}")
135
+
136
+ def display_chat_history():
137
+ st.sidebar.subheader("Chat History")
138
+ st.sidebar.json(st.session_state.chat_history)
139
 
140
  # --- Streamlit UI ---
141
  st.title("DevToolKit: AI-Powered Development Environment")
 
196
 
197
  # --- Display Information ---
198
  st.sidebar.subheader("Current State")
199
+ st.sidebar.json(st.session_state, indent=2)
200
  if st.session_state.active_agent:
201
  display_agent_info(st.session_state.active_agent)
202
  display_workspace_projects()
 
207
  gr.Dropdown(label="Agents", choices=[s for s in agents.keys()], value=list(agents.keys())[0], interactive=True),
208
  gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
209
  gr.Slider(label="Temperature", value=TEMPERATURE, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
210
+ gr.Slider(label="Max new tokens", value=MAX_NEW_TOKENS, minimum=0, maximum=10240, step=64, interactive=True, info="The maximum numbers of new tokens"),
211
  gr.Slider(label="Top-p (nucleus sampling)", value=TOP_P, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
212
  gr.Slider(label="Repetition penalty", value=REPETITION_PENALTY, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
213
  ]