acecalisto3 commited on
Commit
dcbeecc
·
verified ·
1 Parent(s): d903c55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -151
app.py CHANGED
@@ -1,11 +1,16 @@
1
- import subprocess
2
  import streamlit as st
 
 
3
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
4
  import black
5
  from pylint import lint
6
  from io import StringIO
 
 
 
 
 
7
 
8
- HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
9
  PROJECT_ROOT = "projects"
10
  AGENT_DIRECTORY = "agents"
11
 
@@ -18,11 +23,6 @@ if 'workspace_projects' not in st.session_state:
18
  st.session_state.workspace_projects = {}
19
  if 'available_agents' not in st.session_state:
20
  st.session_state.available_agents = []
21
- if 'current_state' not in st.session_state:
22
- st.session_state.current_state = {
23
- 'toolbox': {},
24
- 'workspace_chat': {}
25
- }
26
 
27
  class AIAgent:
28
  def __init__(self, name, description, skills):
@@ -44,27 +44,24 @@ I am confident that I can leverage my expertise to assist you in developing and
44
  """
45
  Autonomous build logic that continues based on the state of chat history and workspace projects.
46
  """
 
47
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
48
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
49
 
 
50
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
51
 
52
  return summary, next_step
53
 
54
  def save_agent_to_file(agent):
55
- """Saves the agent's prompt to a file locally and then commits to the Hugging Face repository."""
56
  if not os.path.exists(AGENT_DIRECTORY):
57
  os.makedirs(AGENT_DIRECTORY)
58
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
59
- config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
60
  with open(file_path, "w") as file:
61
  file.write(agent.create_agent_prompt())
62
- with open(config_path, "w") as file:
63
- file.write(f"Agent Name: {agent.name}\nDescription: {agent.description}")
64
  st.session_state.available_agents.append(agent.name)
65
 
66
- commit_and_push_changes(f"Add agent {agent.name}")
67
-
68
  def load_agent_prompt(agent_name):
69
  """Loads an agent prompt from a file."""
70
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
@@ -107,135 +104,84 @@ def chat_interface_with_agent(input_text, agent_name):
107
 
108
  # Generate chatbot response
109
  outputs = model.generate(
110
- input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
111
  )
112
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
113
  return response
114
 
115
- def workspace_interface(project_name):
116
- project_path = os.path.join(PROJECT_ROOT, project_name)
117
- if not os.path.exists(PROJECT_ROOT):
118
- os.makedirs(PROJECT_ROOT)
119
- if not os.path.exists(project_path):
120
- os.makedirs(project_path)
121
- st.session_state.workspace_projects[project_name] = {"files": []}
122
- st.session_state.current_state['workspace_chat']['project_name'] = project_name
123
- commit_and_push_changes(f"Create project {project_name}")
124
- return f"Project {project_name} created successfully."
125
- else:
126
- return f"Project {project_name} already exists."
127
-
128
- def add_code_to_workspace(project_name, code, file_name):
129
- project_path = os.path.join(PROJECT_ROOT, project_name)
130
- if os.path.exists(project_path):
131
- file_path = os.path.join(project_path, file_name)
132
- with open(file_path, "w") as file:
133
- file.write(code)
134
- st.session_state.workspace_projects[project_name]["files"].append(file_name)
135
- st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
136
- commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
137
- return f"Code added to {file_name} in project {project_name} successfully."
138
- else:
139
- return f"Project {project_name} does not exist."
140
-
141
  def terminal_interface(command, project_name=None):
142
  if project_name:
143
  project_path = os.path.join(PROJECT_ROOT, project_name)
144
- if not os.path.exists(project_path):
145
- return f"Project {project_name} does not exist."
146
- result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
147
  else:
148
  result = subprocess.run(command, shell=True, capture_output=True, text=True)
149
- if result.returncode == 0:
150
- st.session_state.current_state['toolbox']['terminal_output'] = result.stdout
151
- return result.stdout
152
- else:
153
- st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
154
- return result.stderr
155
-
156
- # Chat interface using a selected agent
157
- def chat_interface_with_agent(input_text, agent_name):
158
- # ... [rest of the chat_interface_with_agent function] ...
159
 
 
 
 
 
 
 
 
160
 
 
161
  def summarize_text(text):
162
  summarizer = pipeline("summarization")
163
- summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
164
- st.session_state.current_state['toolbox']['summary'] = summary[0]['summary_text']
165
  return summary[0]['summary_text']
166
 
 
167
  def sentiment_analysis(text):
168
  analyzer = pipeline("sentiment-analysis")
169
- sentiment = analyzer(text)
170
- st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
171
- return sentiment[0]
172
-
173
- # ... [rest of the translate_code function, but remove the OpenAI API call and replace it with your own logic] ...
174
-
175
- def generate_code(code_idea):
176
- # Replace this with a call to a Hugging Face model or your own logic
177
- # For example, using a text-generation pipeline:
178
- generator = pipeline('text-generation', model='gpt2')
179
- generated_code = generator(code_idea, max_length=100, num_return_sequences=1)[0]['generated_text']
180
- st.session_state.current_state['toolbox']['generated_code'] = generated_code
181
- return generated_code
182
-
183
- def translate_code(code, input_language, output_language):
184
- # Define a dictionary to map programming languages to their corresponding file extensions
185
- language_extensions = {
186
-
187
- }
188
-
189
- # Add code to handle edge cases such as invalid input and unsupported programming languages
190
- if input_language not in language_extensions:
191
- raise ValueError(f"Invalid input language: {input_language}")
192
- if output_language not in language_extensions:
193
- raise ValueError(f"Invalid output language: {output_language}")
194
-
195
- # Use the dictionary to map the input and output languages to their corresponding file extensions
196
- input_extension = language_extensions[input_language]
197
- output_extension = language_extensions[output_language]
198
-
199
- # Translate the code using the OpenAI API
200
- prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
201
- response = openai.ChatCompletion.create(
202
- model="gpt-4",
203
- messages=[
204
- {"role": "system", "content": "You are an expert software developer."},
205
- {"role": "user", "content": prompt}
206
- ]
207
  )
208
- translated_code = response.choices[0].message['content'].strip()
209
-
210
- # Return the translated code
211
- translated_code = response.choices[0].message['content'].strip()
212
- st.session_state.current_state['toolbox']['translated_code'] = translated_code
213
- return translated_code
214
-
215
- def generate_code(code_idea):
216
- response = openai.ChatCompletion.create(
217
- model="gpt-4",
218
- messages=[
219
- {"role": "system", "content": "You are an expert software developer."},
220
- {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
221
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  )
223
- generated_code = response.choices[0].message['content'].strip()
224
- st.session_state.current_state['toolbox']['generated_code'] = generated_code
225
- return generated_code
226
-
227
- def commit_and_push_changes(commit_message):
228
- """Commits and pushes changes to the Hugging Face repository."""
229
- commands = [
230
- "git add .",
231
- f"git commit -m '{commit_message}'",
232
- "git push"
233
- ]
234
- for command in commands:
235
- result = subprocess.run(command, shell=True, capture_output=True, text=True)
236
- if result.returncode != 0:
237
- st.error(f"Error executing command '{command}': {result.stderr}")
238
- break
239
 
240
  # Streamlit App
241
  st.title("AI Agent Creator")
@@ -264,12 +210,7 @@ elif app_mode == "Tool Box":
264
  st.subheader("Chat with CodeCraft")
265
  chat_input = st.text_area("Enter your message:")
266
  if st.button("Send"):
267
- if chat_input.startswith("@"):
268
- agent_name = chat_input.split(" ")[0][1:] # Extract agent_name from @agent_name
269
- chat_input = " ".join(chat_input.split(" ")[1:]) # Remove agent_name from input
270
- chat_response = chat_interface_with_agent(chat_input, agent_name)
271
- else:
272
- chat_response = chat_interface(chat_input)
273
  st.session_state.chat_history.append((chat_input, chat_response))
274
  st.write(f"CodeCraft: {chat_response}")
275
 
@@ -306,8 +247,8 @@ elif app_mode == "Tool Box":
306
  # Text Translation Tool (Code Translation)
307
  st.subheader("Translate Code")
308
  code_to_translate = st.text_area("Enter code to translate:")
309
- source_language = st.text_input("Enter source language (e.g. 'Python'):")
310
- target_language = st.text_input("Enter target language (e.g. 'JavaScript'):")
311
  if st.button("Translate Code"):
312
  translated_code = translate_code(code_to_translate, source_language, target_language)
313
  st.code(translated_code, language=target_language.lower())
@@ -319,20 +260,6 @@ elif app_mode == "Tool Box":
319
  generated_code = generate_code(code_idea)
320
  st.code(generated_code, language="python")
321
 
322
- # Display Preset Commands
323
- st.subheader("Preset Commands")
324
- preset_commands = {
325
- "Create a new project": "create_project('project_name')",
326
- "Add code to workspace": "add_code_to_workspace('project_name', 'code', 'file_name')",
327
- "Run terminal command": "terminal_interface('command', 'project_name')",
328
- "Generate code": "generate_code('code_idea')",
329
- "Summarize text": "summarize_text('text')",
330
- "Analyze sentiment": "sentiment_analysis('text')",
331
- "Translate code": "translate_code('code', 'source_language', 'target_language')",
332
- }
333
- for command_name, command in preset_commands.items():
334
- st.write(f"{command_name}: `{command}`")
335
-
336
  elif app_mode == "Workspace Chat App":
337
  # Workspace Chat App
338
  st.header("Workspace Chat App")
@@ -347,7 +274,7 @@ elif app_mode == "Workspace Chat App":
347
  # Add Code to Workspace
348
  st.subheader("Add Code to Workspace")
349
  code_to_add = st.text_area("Enter code to add to workspace:")
350
- file_name = st.text_input("Enter file name (e.g. 'app.py'):")
351
  if st.button("Add Code"):
352
  add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
353
  st.success(add_code_status)
@@ -403,8 +330,4 @@ elif app_mode == "Workspace Chat App":
403
  st.write("Autonomous Build Summary:")
404
  st.write(summary)
405
  st.write("Next Step:")
406
- st.write(next_step)
407
-
408
- # Display current state for debugging
409
- st.sidebar.subheader("Current State")
410
- st.sidebar.json(st.session_state.current_state)
 
 
1
  import streamlit as st
2
+ import os
3
+ import subprocess
4
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
5
  import black
6
  from pylint import lint
7
  from io import StringIO
8
+ import openai
9
+ import sys
10
+
11
+ # Set your OpenAI API key here
12
+ openai.api_key = "YOUR_OPENAI_API_KEY"
13
 
 
14
  PROJECT_ROOT = "projects"
15
  AGENT_DIRECTORY = "agents"
16
 
 
23
  st.session_state.workspace_projects = {}
24
  if 'available_agents' not in st.session_state:
25
  st.session_state.available_agents = []
 
 
 
 
 
26
 
27
  class AIAgent:
28
  def __init__(self, name, description, skills):
 
44
  """
45
  Autonomous build logic that continues based on the state of chat history and workspace projects.
46
  """
47
+ # Example logic: Generate a summary of chat history and workspace state
48
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
49
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
50
 
51
+ # Example: Generate the next logical step in the project
52
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
53
 
54
  return summary, next_step
55
 
56
  def save_agent_to_file(agent):
57
+ """Saves the agent's prompt to a file."""
58
  if not os.path.exists(AGENT_DIRECTORY):
59
  os.makedirs(AGENT_DIRECTORY)
60
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
 
61
  with open(file_path, "w") as file:
62
  file.write(agent.create_agent_prompt())
 
 
63
  st.session_state.available_agents.append(agent.name)
64
 
 
 
65
  def load_agent_prompt(agent_name):
66
  """Loads an agent prompt from a file."""
67
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
 
104
 
105
  # Generate chatbot response
106
  outputs = model.generate(
107
+ input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True
108
  )
109
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
110
  return response
111
 
112
+ # Terminal interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  def terminal_interface(command, project_name=None):
114
  if project_name:
115
  project_path = os.path.join(PROJECT_ROOT, project_name)
116
+ result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_path)
 
 
117
  else:
118
  result = subprocess.run(command, shell=True, capture_output=True, text=True)
119
+ return result.stdout
 
 
 
 
 
 
 
 
 
120
 
121
+ # Code editor interface
122
+ def code_editor_interface(code):
123
+ formatted_code = black.format_str(code, mode=black.FileMode())
124
+ pylint_output = lint.Run([formatted_code], do_exit=False)
125
+ pylint_output_str = StringIO()
126
+ pylint_output.linter.reporter.write_messages(pylint_output_str)
127
+ return formatted_code, pylint_output_str.getvalue()
128
 
129
+ # Text summarization tool
130
  def summarize_text(text):
131
  summarizer = pipeline("summarization")
132
+ summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
 
133
  return summary[0]['summary_text']
134
 
135
+ # Sentiment analysis tool
136
  def sentiment_analysis(text):
137
  analyzer = pipeline("sentiment-analysis")
138
+ result = analyzer(text)
139
+ return result[0]['label']
140
+
141
+ # Text translation tool (code translation)
142
+ def translate_code(code, source_language, target_language):
143
+ # Placeholder for translation logic
144
+ return f"Translated {source_language} code to {target_language}."
145
+
146
+ # Code generation tool
147
+ def generate_code(idea):
148
+ response = openai.Completion.create(
149
+ engine="davinci-codex",
150
+ prompt=idea,
151
+ max_tokens=150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  )
153
+ return response.choices[0].text.strip()
154
+
155
+ # Workspace interface
156
+ def workspace_interface(project_name):
157
+ project_path = os.path.join(PROJECT_ROOT, project_name)
158
+ if not os.path.exists(project_path):
159
+ os.makedirs(project_path)
160
+ st.session_state.workspace_projects[project_name] = {'files': []}
161
+ return f"Project '{project_name}' created successfully."
162
+ else:
163
+ return f"Project '{project_name}' already exists."
164
+
165
+ # Add code to workspace
166
+ def add_code_to_workspace(project_name, code, file_name):
167
+ project_path = os.path.join(PROJECT_ROOT, project_name)
168
+ if not os.path.exists(project_path):
169
+ return f"Project '{project_name}' does not exist."
170
+
171
+ file_path = os.path.join(project_path, file_name)
172
+ with open(file_path, "w") as file:
173
+ file.write(code)
174
+ st.session_state.workspace_projects[project_name]['files'].append(file_name)
175
+ return f"Code added to '{file_name}' in project '{project_name}'."
176
+
177
+ # Chat interface
178
+ def chat_interface(input_text):
179
+ response = openai.Completion.create(
180
+ engine="davinci-codex",
181
+ prompt=input_text,
182
+ max_tokens=150
183
  )
184
+ return response.choices[0].text.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
  # Streamlit App
187
  st.title("AI Agent Creator")
 
210
  st.subheader("Chat with CodeCraft")
211
  chat_input = st.text_area("Enter your message:")
212
  if st.button("Send"):
213
+ chat_response = chat_interface(chat_input)
 
 
 
 
 
214
  st.session_state.chat_history.append((chat_input, chat_response))
215
  st.write(f"CodeCraft: {chat_response}")
216
 
 
247
  # Text Translation Tool (Code Translation)
248
  st.subheader("Translate Code")
249
  code_to_translate = st.text_area("Enter code to translate:")
250
+ source_language = st.text_input("Enter source language (e.g., 'Python'):")
251
+ target_language = st.text_input("Enter target language (e.g., 'JavaScript'):")
252
  if st.button("Translate Code"):
253
  translated_code = translate_code(code_to_translate, source_language, target_language)
254
  st.code(translated_code, language=target_language.lower())
 
260
  generated_code = generate_code(code_idea)
261
  st.code(generated_code, language="python")
262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
  elif app_mode == "Workspace Chat App":
264
  # Workspace Chat App
265
  st.header("Workspace Chat App")
 
274
  # Add Code to Workspace
275
  st.subheader("Add Code to Workspace")
276
  code_to_add = st.text_area("Enter code to add to workspace:")
277
+ file_name = st.text_input("Enter file name (e.g., 'app.py'):")
278
  if st.button("Add Code"):
279
  add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
280
  st.success(add_code_status)
 
330
  st.write("Autonomous Build Summary:")
331
  st.write(summary)
332
  st.write("Next Step:")
333
+ st.write(next_step)