acecalisto3 commited on
Commit
b5504a7
·
verified ·
1 Parent(s): 5ae4817

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +473 -279
app.py CHANGED
@@ -1,287 +1,481 @@
1
- import os
2
  import subprocess
3
- import random
4
- from huggingface_hub import InferenceClient
5
- import gradio as gr
6
- from safe_search import safe_search
7
- from i_search import google
8
- from i_search import i_search as i_s
9
- from agent import ( run_agent, create_interface, format_prompt_var, generate, MAX_HISTORY, client, VERBOSE, date_time_str, )
10
-
11
- from utils import parse_action, parse_file_content, read_python_module_structure
12
- from datetime import datetime
13
-
14
- now = datetime.now()
15
- date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
16
-
17
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
18
-
19
- VERBOSE = True
20
- MAX_HISTORY = 100
21
-
22
- def format_prompt_var(message, history):
23
- prompt = " "
24
- for user_prompt, bot_response in history:
25
- prompt += f"[INST] {user_prompt} [/usr]\n{bot_response}\n"
26
- prompt += f"[INST] {message} [/usr]\n"
27
- return prompt
28
-
29
- def run_gpt(prompt_template, stop_tokens, max_tokens, purpose, **prompt_kwargs):
30
- seed = random.randint(1, 1111111111111111)
31
- print(seed)
32
- generate_kwargs = dict(
33
- temperature=1.0,
34
- max_new_tokens=2096,
35
- top_p=0.99,
36
- repetition_penalty=1.0,
37
- do_sample=True,
38
- seed=seed,
39
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- content = PREFIX.format(
42
- date_time_str=date_time_str,
43
- purpose=purpose,
44
- safe_search=safe_search,
45
- ) + prompt_template.format(**prompt_kwargs)
46
- if VERBOSE:
47
- print(LOG_PROMPT.format(content))
48
-
49
- stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
50
- resp = ""
51
- for response in stream:
52
- resp += response.token.text
53
-
54
- if VERBOSE:
55
- print(LOG_RESPONSE.format(resp))
56
- return resp
57
- def compress_history(purpose, task, history, directory):
58
- resp = run_gpt(
59
- COMPRESS_HISTORY_PROMPT,
60
- stop_tokens=["observation:", "task:", "action:", "thought:"],
61
- max_tokens=512,
62
- purpose=purpose,
63
- task=task,
64
- history=history,
65
- )
66
- history = "observation: {}\n".format(resp)
67
- return history
68
 
69
- def call_search(purpose, task, history, directory, action_input):
70
- print("CALLING SEARCH")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  try:
72
- if "http" in action_input:
73
- if "<" in action_input:
74
- action_input = action_input.strip("<")
75
- if ">" in action_input:
76
- action_input = action_input.strip(">")
77
- response = i_s(action_input)
78
- print(response)
79
- history += "observation: search result is: {}\n".format(response)
80
- else:
81
- history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
82
  except Exception as e:
83
- history += "{}\n".format(e) # Fixing this line to include the exception message
84
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
85
- task = "END"
86
- return action_name, action_input, history, task
87
- def call_set_task(purpose, task, history, directory, action_input):
88
- task = run_gpt(
89
- TASK_PROMPT,
90
- stop_tokens=[],
91
- max_tokens=64,
92
- purpose=purpose,
93
- task=task,
94
- history=history,
95
- ).strip("\n")
96
- history += "observation: task has been updated to: {}\n".format(task)
97
- return "MAIN", None, history, task
98
-
99
- def end_fn(purpose, task, history, directory, action_input):
100
- task = "END"
101
- return "COMPLETE", "COMPLETE", history, task
102
-
103
- EXAMPLE_PROJECT_DIRECTORY = './example_project/'
104
-
105
- PREFIX = """Answer the following question as accurately as possible, providing detailed responses that cover each aspect of the topic. Make sure to maintain a professional tone throughout your answers. Also please make sure to meet the safety criteria specified earlier. Question: What are the suggested approaches for creating a responsive navigation bar? Answer:"""
106
- LOG_PROMPT = "Prompt: {}"
107
- LOG_RESPONSE = "Response: {}"
108
- COMPRESS_HISTORY_PROMPT = """Given the context history, compress it down to something meaningful yet short enough to fit into a single chat message without exceeding over 512 tokens. Context: {}"""
109
- TASK_PROMPT = """Determine the correct next step in terms of actions, thoughts or observations for the following task: {}, current history: {}, current directory: {}."""
110
-
111
- NAME_TO_FUNC = {
112
- "MAIN": call_main,
113
- "UPDATE-TASK": call_set_task,
114
- "SEARCH": call_search,
115
- "COMPLETE": end_fn,
116
- }
117
-
118
- def _clean_up():
119
- if os.path.exists(EXAMPLE_PROJECT_DIRECTORY):
120
- shutil.rmtree(EXAMPLE_PROJECT_DIRECTORY)
121
-
122
- def call_main(purpose, task, history, directory, action_input=''):
123
- _clean_up()
124
- os.makedirs(EXAMPLE_PROJECT_DIRECTORY)
125
- template = '''<!DOCTYPE html>
126
- <html lang="en">
127
- <head>
128
- <meta charset="UTF-8">
129
- <meta http-equiv="X-UA-Compatible" content="IE=edge">
130
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
131
- <title>Document</title>
132
- <style>
133
- {{%style}}
134
- </style>
135
- </head>
136
- <body>
137
- {{%body}}
138
- </body>
139
- </html>'''
140
-
141
- navbar = f'''<nav>
142
- <input type="checkbox" id="check">
143
- <label for="check" class="checkbtn">
144
- <i class="fas fa-bars"></i>
145
- </label>
146
- <label class="logo">LOGO</label>
147
- <ul>
148
- <li><a href="#home">Home</a></li>
149
- <li><a href="#about">About Us</a></li>
150
- <li><a href="#services">Services</a></li>
151
- <li><a href="#contact">Contact Us</a></li>
152
- </ul>
153
- </nav>'''
154
-
155
- css = '''*{
156
- box-sizing: border-box;}
157
-
158
- body {{
159
- font-family: sans-serif;
160
- margin: 0;
161
- padding: 0;
162
- background: #f4f4f4;
163
- }}
164
-
165
- /* Navigation */
166
- nav {{
167
- position: fixed;
168
- width: 100%;
169
- height: 70px;
170
- line-height: 70px;
171
- z-index: 999;
172
- transition: all .6s ease-in-out;
173
- }}
174
-
175
- nav ul {{
176
- float: right;
177
- margin-right: 40px;
178
- display: flex;
179
- justify-content: space-between;
180
- align-items: center;
181
- list-style: none;
182
- }}
183
-
184
- nav li {{
185
- position: relative;
186
- text-transform: uppercase;
187
- letter-spacing: 2px;
188
- cursor: pointer;
189
- padding: 0 10px;
190
- }}
191
-
192
- nav li:hover > ul {{
193
- visibility: visible;
194
- opacity: 1;
195
- transform: translateY(0);
196
- top: auto;
197
- left:auto;
198
- -webkit-transition:all 0.3s linear; /* Safari/Chrome/Opera/Gecko */
199
- -moz-transition:all 0.3s linear; /* FF3.6+ */
200
- -ms-transition:all 0.3s linear; /* IE10 */
201
- -o-transition:all 0.3s linear; /* Opera 10.5–12.00 */
202
- transition:all 0.3s linear;
203
- }}
204
-
205
- nav ul ul {{
206
- visibility: hidden;
207
- opacity: 0;
208
- min-width: 180px;
209
- white-space: nowrap;
210
- background: rgba(255, 255, 255, 0.9);
211
- box-shadow: 0px 0px 3px rgba(0, 0, 0, 0.2);
212
- border-radius: 0px;
213
- transition: all 0.5s cubic-bezier(0.770, 0.000, 0.175, 1.000);
214
- position: absolute;
215
- top: 100%;
216
- left: 0;
217
- z-index: 9999;
218
- padding: 0;
219
- }}'''
220
-
221
- with open(os.path.join(EXAMPLE_PROJECT_DIRECTORY, 'index.html'), 'w') as f:
222
- f.write(template.format(body=navbar, style=css))
223
-
224
- return "MAIN", "", f"Created a responsive navigation bar in:\n{EXAMPLE_PROJECT_DIRECTORY}", task
225
-
226
- def run_action(purpose, task, history, directory, action_name, action_input):
227
- print(f'action_name::{action_name}')
228
  try:
229
- if "RESPONSE" in action_name or "COMPLETE" in action_name:
230
- action_name = "COMPLETE"
231
- task = "END"
232
- return action_name, "COMPLETE", history, task
233
-
234
- if len(history.split('\n')) > MAX_HISTORY:
235
- if VERBOSE:
236
- print("COMPRESSING HISTORY")
237
- history = compress_history(purpose, task, history, directory)
238
- if not action_name in NAME_TO_FUNC:
239
- action_name = "MAIN"
240
- if action_name == '' or action_name is None:
241
- action_name = "MAIN"
242
- assert action_name in NAME_TO_FUNC
243
-
244
- print("RUN: ", action_name, action_input)
245
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
246
  except Exception as e:
247
- history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
248
- return "MAIN", None, history, task
249
- def run(purpose, history):
250
- task = None
251
- directory = "./"
252
- if history:
253
- history = str(history).strip("[]")
254
- if not history:
255
- history = ""
256
-
257
- action_name = "UPDATE-TASK" if task is None else "MAIN"
258
- action_input = None
259
- while True:
260
- print("")
261
- print("")
262
- print("---")
263
- print("purpose:", purpose)
264
- print("task:", task)
265
- print("---")
266
- print(history)
267
- print("---")
268
-
269
- action_name, action_input, history, task = run_action(
270
- purpose,
271
- task,
272
- history,
273
- directory,
274
- action_name,
275
- action_input,
276
- )
277
- yield (history)
278
- if task == "END":
279
- return (history)
280
- iface = gr.Interface(fn=run, inputs=["text", "text"], outputs="text", title="Expert Web Developer Assistant Agent", description="Ask me questions, give me tasks, and I will respond accordingly.\n Example: 'Purpose: Create a contact form | Action: FORMAT INPUT' & Input: '<form><div><label for='email'>Email:</label><input type='email'/></div></form>' ")
281
 
282
-
283
- # Launch the Gradio interface
284
- iface.launch(share=True)
285
-
286
- if __name__ == "__main__":
287
- main("Sample Purpose", "Sample History")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
  import subprocess
3
+ import os
4
+ from io import StringIO
5
+ import sys
6
+ import black
7
+ from pylint import lint
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
9
+
10
+ # Global state to manage communication between Tool Box and Workspace Chat App
11
+ if 'chat_history' not in st.session_state:
12
+ st.session_state.terminal_history = []
13
+ if 'workspace_projects' not in st.session_state:
14
+ st.session_state.workspace_projects = {}
15
+ if 'available_agents' not in st.session_state:
16
+ st.session_state.available_agents = []
17
+
18
+ class AIAgent:
19
+ def __init__(self, name, description, skills):
20
+ self.name = name
21
+ self.description = description
22
+ self.skills = skills
23
+
24
+ def create_agent_prompt(self):
25
+ skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
26
+ agent_prompt = f"""
27
+ I am an AI agent named {self.name}, designed to assist developers with their projects.
28
+ My expertise lies in the following areas:
29
+
30
+ {skills_str}
31
+
32
+ I am here to help you build, deploy, and improve your applications.
33
+ Feel free to ask me any questions or present me with any challenges you encounter.
34
+ I will do my best to provide helpful and insightful responses.
35
+ """
36
+ return agent_prompt
37
+
38
+ def autonomous_build(self, chat_history, workspace_projects):
39
+ """
40
+ Autonomous build logic that continues based on the state of chat history and workspace projects.
41
+ """
42
+ # Example logic: Generate a summary of chat history and workspace state
43
+ summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
44
+ summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
45
+
46
+ # Example: Generate the next logical step in the project
47
+ next_step = "Based on the current state, the next logical step is to implement the main application logic."
48
+
49
+ return summary, next_step
50
+
51
+ def save_agent_to_file(agent):
52
+ """Saves the agent's prompt to a file."""
53
+ if not os.path.exists("agents"):
54
+ os.makedirs("agents")
55
+ file_path = os.path.join("agents", f"{agent.name}.txt")
56
+ with open(file_path, "w") as file:
57
+ file.write(agent.create_agent_prompt())
58
+ st.session_state.available_agents.append(agent.name)
59
+
60
+ def load_agent_prompt(agent_name):
61
+ """Loads an agent prompt from a file."""
62
+ file_path = os.path.join("agents", f"{agent_name}.txt")
63
+ if os.path.exists(file_path):
64
+ with open(file_path, "r") as file:
65
+ agent_prompt = file.read()
66
+ return agent_prompt
67
+ else:
68
+ return None
69
+
70
+ def create_agent_from_text(name, text):
71
+ skills = text.split('\n')
72
+ agent = AIAgent(name, "AI agent created from text input.", skills)
73
+ save_agent_to_file(agent)
74
+ return agent.create_agent_prompt()
75
+
76
+ # Chat interface using a selected agent
77
+ def chat_interface_with_agent(input_text, agent_name):
78
+ agent_prompt = load_agent_prompt(agent_name)
79
+ if agent_prompt is None:
80
+ return f"Agent {agent_name} not found."
81
+
82
+ # Load the GPT-2 model which is compatible with AutoModelForCausalLM
83
+ model_name = "gpt2"
84
+ try:
85
+ model = AutoModelForCausalLM.from_pretrained(model_name)
86
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
87
+ except EnvironmentError as e:
88
+ return f"Error loading model: {e}"
89
+
90
+ # Combine the agent prompt with user input
91
+ combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
92
+
93
+ # Truncate input text to avoid exceeding the model's maximum length
94
+ max_input_length = 900
95
+ input_ids = tokenizer.encode(combined_input, return_tensors="pt")
96
+ if input_ids.shape[1] > max_input_length:
97
+ input_ids = input_ids[:, :max_input_length]
98
+
99
+ outputs = model.generate(input_ids, max_length=1024, do_sample=True)
100
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
101
+ return response
102
+
103
+ # Define functions for each feature
104
+
105
+ # 1. Chat Interface
106
+ def chat_interface(input_text):
107
+ """Handles user input in the chat interface.
108
+
109
+ Args:
110
+ input_text: User's input text.
111
+
112
+
113
+
114
+
115
+ Returns:
116
+ The chatbot's response.
117
+ """
118
+ # Load the GPT-2 model which is compatible with AutoModelForCausalLM
119
+ model_name = "gpt2"
120
+ try:
121
+ model = AutoModelForCausalLM.from_pretrained(model_name)
122
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
123
+ except EnvironmentError as e:
124
+ return f"Error loading model: {e}"
125
+
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
+
129
+ # Truncate input text to avoid exceeding the model's maximum length
130
+ max_input_length = 900
131
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
132
+ if input_ids.shape[1] > max_input_length:
133
+ input_ids = input_ids[:, :max_input_length]
134
+
135
+ outputs = model.generate(input_ids, max_length=1024, do_sample=True)
136
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
137
+ return response
138
+
139
+
140
+ # 2. Terminal
141
+ def terminal_interface(command, project_name=None):
142
+ """Executes commands in the terminal.
143
+
144
+ Args:
145
+ command: User's command.
146
+ project_name: Name of the project workspace to add installed packages.
147
+
148
+ Returns:
149
+ The terminal output.
150
+ """
151
+ # Execute command
152
  try:
153
+ process = subprocess.run(command.split(), capture_output=True, text=True)
154
+ output = process.stdout
155
+
156
+ # If the command is to install a package, update the workspace
157
+ if "install" in command and project_name:
158
+ requirements_path = os.path.join("projects", project_name, "requirements.txt")
159
+ with open(requirements_path, "a") as req_file:
160
+ package_name = command.split()[-1]
161
+ req_file.write(f"{package_name}\n")
 
162
  except Exception as e:
163
+ output = f"Error: {e}"
164
+ return output
165
+
166
+
167
+ # 3. Code Editor
168
+ def code_editor_interface(code):
169
+ """Provides code completion, formatting, and linting in the code editor.
170
+
171
+ Args:
172
+ code: User's code.
173
+
174
+ Returns:
175
+ Formatted and linted code.
176
+ """
177
+ # Format code using black
178
+ try:
179
+ formatted_code = black.format_str(code, mode=black.FileMode())
180
+ except black.InvalidInput:
181
+ formatted_code = code # Keep original code if formatting fails
182
+
183
+ # Lint code using pylint
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  try:
185
+ pylint_output = StringIO()
186
+ sys.stdout = pylint_output
187
+ sys.stderr = pylint_output
188
+ lint.Run(['--from-stdin'], stdin=StringIO(formatted_code))
189
+ sys.stdout = sys.__stdout__
190
+ sys.stderr = sys.__stderr__
191
+ lint_message = pylint_output.getvalue()
 
 
 
 
 
 
 
 
 
 
192
  except Exception as e:
193
+ lint_message = f"Pylint error: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
+ return formatted_code, lint_message
196
+
197
+
198
+ # 4. Workspace
199
+ def workspace_interface(project_name):
200
+ """Manages projects, files, and resources in the workspace.
201
+
202
+ Args:
203
+ project_name: Name of the new project.
204
+
205
+ Returns:
206
+ Project creation status.
207
+ """
208
+ project_path = os.path.join("projects", project_name)
209
+ # Create project directory
210
+ try:
211
+ os.makedirs(project_path)
212
+ requirements_path = os.path.join(project_path, "requirements.txt")
213
+ with open(requirements_path, "w") as req_file:
214
+ req_file.write("") # Initialize an empty requirements.txt file
215
+ status = f'Project "{project_name}" created successfully.'
216
+ st.session_state.workspace_projects[project_name] = {'files': []}
217
+ except FileExistsError:
218
+ status = f'Project "{project_name}" already exists.'
219
+ return status
220
+
221
+ def add_code_to_workspace(project_name, code, file_name):
222
+ """Adds selected code files to the workspace.
223
+
224
+ Args:
225
+ project_name: Name of the project.
226
+ code: Code to be added.
227
+ file_name: Name of the file to be created.
228
+
229
+ Returns:
230
+ File creation status.
231
+ """
232
+ project_path = os.path.join("projects", project_name)
233
+ file_path = os.path.join(project_path, file_name)
234
+
235
+ try:
236
+ with open(file_path, "w") as code_file:
237
+ code_file.write(code)
238
+ status = f'File "{file_name}" added to project "{project_name}" successfully.'
239
+ st.session_state.workspace_projects[project_name]['files'].append(file_name)
240
+ except Exception as e:
241
+ status = f"Error: {e}"
242
+ return status
243
+
244
+
245
+ # 5. AI-Infused Tools
246
+
247
+ # Define custom AI-powered tools using Hugging Face models
248
+
249
+ # Example: Text summarization tool
250
+ def summarize_text(text):
251
+ """Summarizes a given text using a Hugging Face model.
252
+
253
+ Args:
254
+ text: Text to be summarized.
255
+
256
+ Returns:
257
+ Summarized text.
258
+ """
259
+ # Load the summarization model
260
+ model_name = "facebook/bart-large-cnn"
261
+ try:
262
+ summarizer = pipeline("summarization", model=model_name)
263
+ except EnvironmentError as e:
264
+ return f"Error loading model: {e}"
265
+
266
+ # Truncate input text to avoid exceeding the model's maximum length
267
+ max_input_length = 1024
268
+ inputs = text
269
+ if len(text) > max_input_length:
270
+ inputs = text[:max_input_length]
271
+
272
+ # Generate summary
273
+ summary = summarizer(inputs, max_length=100, min_length=30, do_sample=False)[0][
274
+ "summary_text"
275
+ ]
276
+ return summary
277
+
278
+ # Example: Sentiment analysis tool
279
+ def sentiment_analysis(text):
280
+ """Performs sentiment analysis on a given text using a Hugging Face model.
281
+
282
+ Args:
283
+ text: Text to be analyzed.
284
+
285
+ Returns:
286
+ Sentiment analysis result.
287
+ """
288
+ # Load the sentiment analysis model
289
+ model_name = "distilbert-base-uncased-finetuned-sst-2-english"
290
+ try:
291
+ analyzer = pipeline("sentiment-analysis", model=model_name)
292
+ except EnvironmentError as e:
293
+ return f"Error loading model: {e}"
294
+
295
+ # Perform sentiment analysis
296
+ result = analyzer(text)[0]
297
+ return result
298
+
299
+ # Example: Text translation tool (code translation)
300
+ def translate_code(code, source_language, target_language):
301
+ """Translates code from one programming language to another using OpenAI Codex.
302
+
303
+ Args:
304
+ code: Code to be translated.
305
+ source_language: The source programming language.
306
+ target_language: The target programming language.
307
+
308
+ Returns:
309
+ Translated code.
310
+ """
311
+ # You might want to replace this with a Hugging Face translation model
312
+ # for example, "Helsinki-NLP/opus-mt-en-fr"
313
+ # Refer to Hugging Face documentation for model usage.
314
+ prompt = f"Translate the following {source_language} code to {target_language}:\n\n{code}"
315
+ try:
316
+ # Use a Hugging Face translation model instead of OpenAI Codex
317
+ # ...
318
+ translated_code = "Translated code" # Replace with actual translation
319
+ except Exception as e:
320
+ translated_code = f"Error: {e}"
321
+ return translated_code
322
+
323
+
324
+ # 6. Code Generation
325
+ def generate_code(idea):
326
+ """Generates code based on a given idea using the EleutherAI/gpt-neo-2.7B model.
327
+ Args:
328
+ idea: The idea for the code to be generated.
329
+ Returns:
330
+ The generated code as a string.
331
+ """
332
+
333
+ # Load the code generation model
334
+ model_name = "EleutherAI/gpt-neo-2.7B"
335
+ try:
336
+ model = AutoModelForCausalLM.from_pretrained(model_name)
337
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
338
+ except EnvironmentError as e:
339
+ return f"Error loading model: {e}"
340
+
341
+ # Generate the code
342
+ input_text = f"""
343
+ # Idea: {idea}
344
+ # Code:
345
+ """
346
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
347
+ output_sequences = model.generate(
348
+ input_ids=input_ids,
349
+ max_length=1024,
350
+ num_return_sequences=1,
351
+ no_repeat_ngram_size=2,
352
+ early_stopping=True,
353
+ temperature=0.7, # Adjust temperature for creativity
354
+ top_k=50, # Adjust top_k for diversity
355
+ )
356
+ generated_code = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
357
+
358
+ # Remove the prompt and formatting
359
+ parts = generated_code.split("\n# Code:")
360
+ if len(parts) > 1:
361
+ generated_code = parts[1].strip()
362
+ else:
363
+ generated_code = generated_code.strip()
364
+
365
+ return generated_code
366
+
367
+
368
+ # 7. AI Personas Creator
369
+ def create_persona_from_text(text):
370
+ """Creates an AI persona from the given text.
371
+
372
+ Args:
373
+ text: Text to be used for creating the persona.
374
+
375
+ Returns:
376
+ Persona prompt.
377
+ """
378
+ persona_prompt = f"""
379
+ As an elite expert developer with the highest level of proficiency in Streamlit, Gradio, and Hugging Face, I possess a comprehensive understanding of these technologies and their applications in web development and deployment. My expertise encompasses the following areas:
380
+
381
+ Streamlit:
382
+ * In-depth knowledge of Streamlit's architecture, components, and customization options.
383
+ * Expertise in creating interactive and user-friendly dashboards and applications.
384
+ * Proficiency in integrating Streamlit with various data sources and machine learning models.
385
+
386
+ Gradio:
387
+ * Thorough understanding of Gradio's capabilities for building and deploying machine learning interfaces.
388
+ * Expertise in creating custom Gradio components and integrating them with Streamlit applications.
389
+ * Proficiency in using Gradio to deploy models from Hugging Face and other frameworks.
390
+
391
+ Hugging Face:
392
+ * Comprehensive knowledge of Hugging Face's model hub and Transformers library.
393
+ * Expertise in fine-tuning and deploying Hugging Face models for various NLP and computer vision tasks.
394
+ * Proficiency in using Hugging Face's Spaces platform for model deployment and sharing.
395
+
396
+ Deployment:
397
+ * In-depth understanding of best practices for deploying Streamlit and Gradio applications.
398
+ * Expertise in deploying models on cloud platforms such as AWS, Azure, and GCP.
399
+ * Proficiency in optimizing deployment configurations for performance and scalability.
400
+
401
+ Additional Skills:
402
+ * Strong programming skills in Python and JavaScript.
403
+ * Familiarity with Docker and containerization technologies.
404
+ * Excellent communication and problem-solving abilities.
405
+
406
+ I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications using Streamlit, Gradio, and Hugging Face. Please feel free to ask any questions or present any challenges you may encounter.
407
+
408
+ Example:
409
+
410
+ Task:
411
+ Develop a Streamlit application that allows users to generate text using a Hugging Face model. The application should include a Gradio component for user input and model prediction.
412
+
413
+ Solution:
414
+
415
+ import streamlit as st
416
+ import gradio as gr
417
+ from transformers import pipeline
418
+
419
+ # Create a Hugging Face pipeline
420
+ huggingface_model = pipeline("text-generation")
421
+
422
+ # Create a Streamlit app
423
+ st.title("Hugging Face Text Generation App")
424
+
425
+ # Define a Gradio component
426
+ demo = gr.Interface(
427
+ fn=huggingface_model,
428
+ inputs=gr.Textbox(lines=2),
429
+ outputs=gr.Textbox(lines=1),
430
+ )
431
+
432
+ # Display the Gradio component in the Streamlit app
433
+ st.write(demo)
434
+ """
435
+ return persona_prompt
436
+
437
+
438
+ # Streamlit App
439
+ st.title("AI Agent Creator")
440
+
441
+ # Sidebar navigation
442
+ st.sidebar.title("Navigation")
443
+ app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
444
+
445
+ if app_mode == "AI Agent Creator":
446
+ # AI Agent Creator
447
+ st.header("Create an AI Agent from Text")
448
+
449
+ st.subheader("From Text")
450
+ agent_name = st.text_input("Enter agent name:")
451
+ text_input = st.text_area("Enter skills (one per line):")
452
+ if st.button("Create Agent"):
453
+ agent_prompt = create_agent_from_text(agent_name, text_input)
454
+ st.success(f"Agent '{agent_name}' created and saved successfully.")
455
+ st.session_state.available_agents.append(agent_name)
456
+
457
+ elif app_mode == "Tool Box":
458
+ # Tool Box
459
+ for project, details in st.session_state.workspace_projects.items():
460
+ st.write(f"Project: {project}")
461
+ for file in details['files']:
462
+ st.write(f" - {file}")
463
+
464
+ # Chat with AI Agents
465
+ st.subheader("Chat with AI Agents")
466
+ selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
467
+ agent_chat_input = st.text_area("Enter your message for the agent:")
468
+ if st.button("Send to Agent"):
469
+ agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
470
+ st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
471
+ st.write(f"{selected_agent}: {agent_chat_response}")
472
+
473
+ # Automate Build Process
474
+ st.subheader("Automate Build Process")
475
+ if st.button("Automate"):
476
+ agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
477
+ summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
478
+ st.write("Autonomous Build Summary:")
479
+ st.write(summary)
480
+ st.write("Next Step:")
481
+ st.write(next_step)